diff --git a/go.mod b/go.mod index 82639ad83..888c94e82 100644 --- a/go.mod +++ b/go.mod @@ -24,7 +24,7 @@ require ( github.com/gofrs/uuid v4.2.0+incompatible github.com/golang/mock v1.6.0 github.com/golangci/golangci-lint v1.49.0 - github.com/google/go-cmp v0.5.8 + github.com/google/go-cmp v0.5.9 github.com/jewzaam/go-cosmosdb v0.0.0-20220315232836-282b67c5b234 github.com/jongio/azidext/go/azidext v0.4.0 github.com/jstemmer/go-junit-report v0.9.1 @@ -38,7 +38,7 @@ require ( github.com/sirupsen/logrus v1.9.0 github.com/spf13/cobra v1.5.0 golang.org/x/crypto v0.14.0 - golang.org/x/tools v0.6.0 + golang.org/x/tools v0.7.0 gotest.tools/gotestsum v1.6.4 k8s.io/api v0.25.1 k8s.io/apimachinery v0.25.1 @@ -49,7 +49,8 @@ require ( require ( 4d63.com/gochecknoglobals v0.1.0 // indirect - cloud.google.com/go/compute v1.7.0 // indirect + cloud.google.com/go/compute v1.19.1 // indirect + cloud.google.com/go/compute/metadata v0.2.3 // indirect github.com/AlecAivazis/survey/v2 v2.3.5 // indirect github.com/Antonboom/errname v0.1.7 // indirect github.com/Antonboom/nilnil v0.1.1 // indirect @@ -90,7 +91,7 @@ require ( github.com/breml/bidichk v0.2.3 // indirect github.com/breml/errchkjson v0.3.0 // indirect github.com/butuzov/ireturn v0.1.1 // indirect - github.com/cespare/xxhash/v2 v2.1.2 // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/charithe/durationcheck v0.0.9 // indirect github.com/chavacava/garif v0.0.0-20220630083739-93517212f375 // indirect github.com/clarketm/json v1.17.1 // indirect @@ -147,7 +148,7 @@ require ( github.com/gogo/protobuf v1.3.2 // indirect github.com/golang-jwt/jwt/v4 v4.5.0 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/protobuf v1.5.2 // indirect + github.com/golang/protobuf v1.5.3 // indirect github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2 // indirect github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a // indirect github.com/golangci/go-misc v0.0.0-20220329215616-d24fe342adfe // indirect @@ -161,8 +162,8 @@ require ( github.com/google/gofuzz v1.2.0 // indirect github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect github.com/google/uuid v1.3.0 // indirect - github.com/googleapis/enterprise-certificate-proxy v0.1.0 // indirect - github.com/googleapis/gax-go/v2 v2.4.0 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.2.3 // indirect + github.com/googleapis/gax-go/v2 v2.7.1 // indirect github.com/gophercloud/gophercloud v0.24.0 // indirect github.com/gophercloud/utils v0.0.0-20220307143606-8e7800759d16 // indirect github.com/gordonklaus/ineffassign v0.0.0-20210914165742-4cc7213b9bc8 // indirect @@ -254,7 +255,7 @@ require ( github.com/pmezard/go-difflib v1.0.0 // indirect github.com/polyfloyd/go-errorlint v1.0.2 // indirect github.com/prometheus/client_golang v1.13.0 // indirect - github.com/prometheus/client_model v0.2.0 // indirect + github.com/prometheus/client_model v0.3.0 // indirect github.com/prometheus/common v0.37.0 // indirect github.com/prometheus/procfs v0.8.0 // indirect github.com/quasilyte/go-ruleguard v0.3.17 // indirect @@ -274,15 +275,15 @@ require ( github.com/sivchari/tenv v1.7.0 // indirect github.com/sonatard/noctx v0.0.1 // indirect github.com/sourcegraph/go-diff v0.6.1 // indirect - github.com/spf13/afero v1.8.2 // indirect + github.com/spf13/afero v1.9.2 // indirect github.com/spf13/cast v1.5.0 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/spf13/pflag v1.0.6-0.20210604193023-d5e0c0615ace // indirect github.com/spf13/viper v1.12.0 // indirect github.com/ssgreg/nlreturn/v2 v2.2.1 // indirect github.com/stbenjam/no-sprintf-host-port v0.1.1 // indirect - github.com/stretchr/objx v0.4.0 // indirect - github.com/stretchr/testify v1.8.0 // indirect + github.com/stretchr/objx v0.5.0 // indirect + github.com/stretchr/testify v1.8.3 // indirect github.com/subosito/gotenv v1.4.0 // indirect github.com/sylvia7788/contextcheck v1.0.6 // indirect github.com/tdakkota/asciicheck v0.1.1 // indirect @@ -301,27 +302,27 @@ require ( github.com/yeya24/promlinter v0.2.0 // indirect gitlab.com/bosi/decorder v0.2.3 // indirect go.mongodb.org/mongo-driver v1.9.0 // indirect - go.opencensus.io v0.23.0 // indirect + go.opencensus.io v0.24.0 // indirect go.uber.org/atomic v1.9.0 // indirect go.uber.org/multierr v1.6.0 // indirect go.uber.org/zap v1.21.0 // indirect go4.org v0.0.0-20201209231011-d4a079459e60 // indirect - golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e // indirect + golang.org/x/exp v0.0.0-20220827204233-334a2380cb91 // indirect golang.org/x/exp/typeparams v0.0.0-20220613132600-b0d781184e0d // indirect - golang.org/x/mod v0.8.0 // indirect + golang.org/x/mod v0.9.0 // indirect golang.org/x/net v0.17.0 // indirect - golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2 // indirect + golang.org/x/oauth2 v0.7.0 // indirect golang.org/x/sync v0.1.0 // indirect golang.org/x/sys v0.13.0 // indirect golang.org/x/term v0.13.0 // indirect golang.org/x/text v0.13.0 // indirect - golang.org/x/time v0.0.0-20220609170525-579cf78fd858 // indirect + golang.org/x/time v0.3.0 // indirect gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect - google.golang.org/api v0.91.0 // indirect + google.golang.org/api v0.114.0 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20220808131553-a91ffa7f803e // indirect - google.golang.org/grpc v1.48.0 // indirect - google.golang.org/protobuf v1.28.1 // indirect + google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect + google.golang.org/grpc v1.56.3 // indirect + google.golang.org/protobuf v1.30.0 // indirect gopkg.in/gcfg.v1 v1.2.3 // indirect gopkg.in/go-playground/validator.v9 v9.31.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect @@ -573,7 +574,8 @@ replace ( github.com/uber/athenadriver => github.com/uber/athenadriver v1.1.10 github.com/willf/bitset => github.com/bits-and-blooms/bitset v1.2.1 google.golang.org/cloud => cloud.google.com/go v0.97.0 - google.golang.org/grpc => google.golang.org/grpc v1.40.0 + // CVE-2023-44487 gRPC-Go HTTP/2 Rapid Reset vulnerability https://github.com/grpc/grpc-go/pull/6703 + google.golang.org/grpc => google.golang.org/grpc v1.56.3 k8s.io/klog/v2 => k8s.io/klog/v2 v2.70.1 k8s.io/kube-openapi => k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1 k8s.io/kube-state-metrics => k8s.io/kube-state-metrics v1.9.7 diff --git a/go.sum b/go.sum index 02aeb160b..d2ca11d32 100644 --- a/go.sum +++ b/go.sum @@ -37,31 +37,500 @@ cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+Y cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= +cloud.google.com/go v0.100.1/go.mod h1:fs4QogzfH5n2pBXBP9vRiU+eCny7lD2vmFZy79Iuw1U= cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc= +cloud.google.com/go v0.102.1/go.mod h1:XZ77E9qnTEnrgEOvr4xzfdX5TRo7fB4T2F4O6+34hIU= +cloud.google.com/go v0.104.0/go.mod h1:OO6xxXdJyvuJPcEPBLN9BJPD+jep5G1+2U5B5gkRYtA= +cloud.google.com/go v0.105.0/go.mod h1:PrLgOJNe5nfE9UMxKxgXj4mD3voiP+YQ6gdt6KMFOKM= +cloud.google.com/go v0.107.0/go.mod h1:wpc2eNrD7hXUTy8EKS10jkxpZBjASrORK7goS+3YX2I= +cloud.google.com/go v0.110.0 h1:Zc8gqp3+a9/Eyph2KDmcGaPtbKRIoqq4YTlL4NMD0Ys= +cloud.google.com/go v0.110.0/go.mod h1:SJnCLqQ0FCFGSZMUNUf84MV3Aia54kn7pi8st7tMzaY= +cloud.google.com/go/accessapproval v1.4.0/go.mod h1:zybIuC3KpDOvotz59lFe5qxRZx6C75OtwbisN56xYB4= +cloud.google.com/go/accessapproval v1.5.0/go.mod h1:HFy3tuiGvMdcd/u+Cu5b9NkO1pEICJ46IR82PoUdplw= +cloud.google.com/go/accessapproval v1.6.0/go.mod h1:R0EiYnwV5fsRFiKZkPHr6mwyk2wxUJ30nL4j2pcFY2E= +cloud.google.com/go/accesscontextmanager v1.3.0/go.mod h1:TgCBehyr5gNMz7ZaH9xubp+CE8dkrszb4oK9CWyvD4o= +cloud.google.com/go/accesscontextmanager v1.4.0/go.mod h1:/Kjh7BBu/Gh83sv+K60vN9QE5NJcd80sU33vIe2IFPE= +cloud.google.com/go/accesscontextmanager v1.6.0/go.mod h1:8XCvZWfYw3K/ji0iVnp+6pu7huxoQTLmxAbVjbloTtM= +cloud.google.com/go/accesscontextmanager v1.7.0/go.mod h1:CEGLewx8dwa33aDAZQujl7Dx+uYhS0eay198wB/VumQ= +cloud.google.com/go/aiplatform v1.22.0/go.mod h1:ig5Nct50bZlzV6NvKaTwmplLLddFx0YReh9WfTO5jKw= +cloud.google.com/go/aiplatform v1.24.0/go.mod h1:67UUvRBKG6GTayHKV8DBv2RtR1t93YRu5B1P3x99mYY= +cloud.google.com/go/aiplatform v1.27.0/go.mod h1:Bvxqtl40l0WImSb04d0hXFU7gDOiq9jQmorivIiWcKg= +cloud.google.com/go/aiplatform v1.35.0/go.mod h1:7MFT/vCaOyZT/4IIFfxH4ErVg/4ku6lKv3w0+tFTgXQ= +cloud.google.com/go/aiplatform v1.36.1/go.mod h1:WTm12vJRPARNvJ+v6P52RDHCNe4AhvjcIZ/9/RRHy/k= +cloud.google.com/go/aiplatform v1.37.0/go.mod h1:IU2Cv29Lv9oCn/9LkFiiuKfwrRTq+QQMbW+hPCxJGZw= +cloud.google.com/go/analytics v0.11.0/go.mod h1:DjEWCu41bVbYcKyvlws9Er60YE4a//bK6mnhWvQeFNI= +cloud.google.com/go/analytics v0.12.0/go.mod h1:gkfj9h6XRf9+TS4bmuhPEShsh3hH8PAZzm/41OOhQd4= +cloud.google.com/go/analytics v0.17.0/go.mod h1:WXFa3WSym4IZ+JiKmavYdJwGG/CvpqiqczmL59bTD9M= +cloud.google.com/go/analytics v0.18.0/go.mod h1:ZkeHGQlcIPkw0R/GW+boWHhCOR43xz9RN/jn7WcqfIE= +cloud.google.com/go/analytics v0.19.0/go.mod h1:k8liqf5/HCnOUkbawNtrWWc+UAzyDlW89doe8TtoDsE= +cloud.google.com/go/apigateway v1.3.0/go.mod h1:89Z8Bhpmxu6AmUxuVRg/ECRGReEdiP3vQtk4Z1J9rJk= +cloud.google.com/go/apigateway v1.4.0/go.mod h1:pHVY9MKGaH9PQ3pJ4YLzoj6U5FUDeDFBllIz7WmzJoc= +cloud.google.com/go/apigateway v1.5.0/go.mod h1:GpnZR3Q4rR7LVu5951qfXPJCHquZt02jf7xQx7kpqN8= +cloud.google.com/go/apigeeconnect v1.3.0/go.mod h1:G/AwXFAKo0gIXkPTVfZDd2qA1TxBXJ3MgMRBQkIi9jc= +cloud.google.com/go/apigeeconnect v1.4.0/go.mod h1:kV4NwOKqjvt2JYR0AoIWo2QGfoRtn/pkS3QlHp0Ni04= +cloud.google.com/go/apigeeconnect v1.5.0/go.mod h1:KFaCqvBRU6idyhSNyn3vlHXc8VMDJdRmwDF6JyFRqZ8= +cloud.google.com/go/apigeeregistry v0.4.0/go.mod h1:EUG4PGcsZvxOXAdyEghIdXwAEi/4MEaoqLMLDMIwKXY= +cloud.google.com/go/apigeeregistry v0.5.0/go.mod h1:YR5+s0BVNZfVOUkMa5pAR2xGd0A473vA5M7j247o1wM= +cloud.google.com/go/apigeeregistry v0.6.0/go.mod h1:BFNzW7yQVLZ3yj0TKcwzb8n25CFBri51GVGOEUcgQsc= +cloud.google.com/go/apikeys v0.4.0/go.mod h1:XATS/yqZbaBK0HOssf+ALHp8jAlNHUgyfprvNcBIszU= +cloud.google.com/go/apikeys v0.5.0/go.mod h1:5aQfwY4D+ewMMWScd3hm2en3hCj+BROlyrt3ytS7KLI= +cloud.google.com/go/apikeys v0.6.0/go.mod h1:kbpXu5upyiAlGkKrJgQl8A0rKNNJ7dQ377pdroRSSi8= +cloud.google.com/go/appengine v1.4.0/go.mod h1:CS2NhuBuDXM9f+qscZ6V86m1MIIqPj3WC/UoEuR1Sno= +cloud.google.com/go/appengine v1.5.0/go.mod h1:TfasSozdkFI0zeoxW3PTBLiNqRmzraodCWatWI9Dmak= +cloud.google.com/go/appengine v1.6.0/go.mod h1:hg6i0J/BD2cKmDJbaFSYHFyZkgBEfQrDg/X0V5fJn84= +cloud.google.com/go/appengine v1.7.0/go.mod h1:eZqpbHFCqRGa2aCdope7eC0SWLV1j0neb/QnMJVWx6A= +cloud.google.com/go/appengine v1.7.1/go.mod h1:IHLToyb/3fKutRysUlFO0BPt5j7RiQ45nrzEJmKTo6E= +cloud.google.com/go/area120 v0.5.0/go.mod h1:DE/n4mp+iqVyvxHN41Vf1CR602GiHQjFPusMFW6bGR4= +cloud.google.com/go/area120 v0.6.0/go.mod h1:39yFJqWVgm0UZqWTOdqkLhjoC7uFfgXRC8g/ZegeAh0= +cloud.google.com/go/area120 v0.7.0/go.mod h1:a3+8EUD1SX5RUcCs3MY5YasiO1z6yLiNLRiFrykbynY= +cloud.google.com/go/area120 v0.7.1/go.mod h1:j84i4E1RboTWjKtZVWXPqvK5VHQFJRF2c1Nm69pWm9k= +cloud.google.com/go/artifactregistry v1.6.0/go.mod h1:IYt0oBPSAGYj/kprzsBjZ/4LnG/zOcHyFHjWPCi6SAQ= +cloud.google.com/go/artifactregistry v1.7.0/go.mod h1:mqTOFOnGZx8EtSqK/ZWcsm/4U8B77rbcLP6ruDU2Ixk= +cloud.google.com/go/artifactregistry v1.8.0/go.mod h1:w3GQXkJX8hiKN0v+at4b0qotwijQbYUqF2GWkZzAhC0= +cloud.google.com/go/artifactregistry v1.9.0/go.mod h1:2K2RqvA2CYvAeARHRkLDhMDJ3OXy26h3XW+3/Jh2uYc= +cloud.google.com/go/artifactregistry v1.11.1/go.mod h1:lLYghw+Itq9SONbCa1YWBoWs1nOucMH0pwXN1rOBZFI= +cloud.google.com/go/artifactregistry v1.11.2/go.mod h1:nLZns771ZGAwVLzTX/7Al6R9ehma4WUEhZGWV6CeQNQ= +cloud.google.com/go/artifactregistry v1.12.0/go.mod h1:o6P3MIvtzTOnmvGagO9v/rOjjA0HmhJ+/6KAXrmYDCI= +cloud.google.com/go/artifactregistry v1.13.0/go.mod h1:uy/LNfoOIivepGhooAUpL1i30Hgee3Cu0l4VTWHUC08= +cloud.google.com/go/asset v1.5.0/go.mod h1:5mfs8UvcM5wHhqtSv8J1CtxxaQq3AdBxxQi2jGW/K4o= +cloud.google.com/go/asset v1.7.0/go.mod h1:YbENsRK4+xTiL+Ofoj5Ckf+O17kJtgp3Y3nn4uzZz5s= +cloud.google.com/go/asset v1.8.0/go.mod h1:mUNGKhiqIdbr8X7KNayoYvyc4HbbFO9URsjbytpUaW0= +cloud.google.com/go/asset v1.9.0/go.mod h1:83MOE6jEJBMqFKadM9NLRcs80Gdw76qGuHn8m3h8oHQ= +cloud.google.com/go/asset v1.10.0/go.mod h1:pLz7uokL80qKhzKr4xXGvBQXnzHn5evJAEAtZiIb0wY= +cloud.google.com/go/asset v1.11.1/go.mod h1:fSwLhbRvC9p9CXQHJ3BgFeQNM4c9x10lqlrdEUYXlJo= +cloud.google.com/go/asset v1.12.0/go.mod h1:h9/sFOa4eDIyKmH6QMpm4eUK3pDojWnUhTgJlk762Hg= +cloud.google.com/go/asset v1.13.0/go.mod h1:WQAMyYek/b7NBpYq/K4KJWcRqzoalEsxz/t/dTk4THw= +cloud.google.com/go/assuredworkloads v1.5.0/go.mod h1:n8HOZ6pff6re5KYfBXcFvSViQjDwxFkAkmUFffJRbbY= +cloud.google.com/go/assuredworkloads v1.6.0/go.mod h1:yo2YOk37Yc89Rsd5QMVECvjaMKymF9OP+QXWlKXUkXw= +cloud.google.com/go/assuredworkloads v1.7.0/go.mod h1:z/736/oNmtGAyU47reJgGN+KVoYoxeLBoj4XkKYscNI= +cloud.google.com/go/assuredworkloads v1.8.0/go.mod h1:AsX2cqyNCOvEQC8RMPnoc0yEarXQk6WEKkxYfL6kGIo= +cloud.google.com/go/assuredworkloads v1.9.0/go.mod h1:kFuI1P78bplYtT77Tb1hi0FMxM0vVpRC7VVoJC3ZoT0= +cloud.google.com/go/assuredworkloads v1.10.0/go.mod h1:kwdUQuXcedVdsIaKgKTp9t0UJkE5+PAVNhdQm4ZVq2E= +cloud.google.com/go/automl v1.5.0/go.mod h1:34EjfoFGMZ5sgJ9EoLsRtdPSNZLcfflJR39VbVNS2M0= +cloud.google.com/go/automl v1.6.0/go.mod h1:ugf8a6Fx+zP0D59WLhqgTDsQI9w07o64uf/Is3Nh5p8= +cloud.google.com/go/automl v1.7.0/go.mod h1:RL9MYCCsJEOmt0Wf3z9uzG0a7adTT1fe+aObgSpkCt8= +cloud.google.com/go/automl v1.8.0/go.mod h1:xWx7G/aPEe/NP+qzYXktoBSDfjO+vnKMGgsApGJJquM= +cloud.google.com/go/automl v1.12.0/go.mod h1:tWDcHDp86aMIuHmyvjuKeeHEGq76lD7ZqfGLN6B0NuU= +cloud.google.com/go/baremetalsolution v0.3.0/go.mod h1:XOrocE+pvK1xFfleEnShBlNAXf+j5blPPxrhjKgnIFc= +cloud.google.com/go/baremetalsolution v0.4.0/go.mod h1:BymplhAadOO/eBa7KewQ0Ppg4A4Wplbn+PsFKRLo0uI= +cloud.google.com/go/baremetalsolution v0.5.0/go.mod h1:dXGxEkmR9BMwxhzBhV0AioD0ULBmuLZI8CdwalUxuss= +cloud.google.com/go/batch v0.3.0/go.mod h1:TR18ZoAekj1GuirsUsR1ZTKN3FC/4UDnScjT8NXImFE= +cloud.google.com/go/batch v0.4.0/go.mod h1:WZkHnP43R/QCGQsZ+0JyG4i79ranE2u8xvjq/9+STPE= +cloud.google.com/go/batch v0.7.0/go.mod h1:vLZN95s6teRUqRQ4s3RLDsH8PvboqBK+rn1oevL159g= +cloud.google.com/go/beyondcorp v0.2.0/go.mod h1:TB7Bd+EEtcw9PCPQhCJtJGjk/7TC6ckmnSFS+xwTfm4= +cloud.google.com/go/beyondcorp v0.3.0/go.mod h1:E5U5lcrcXMsCuoDNyGrpyTm/hn7ne941Jz2vmksAxW8= +cloud.google.com/go/beyondcorp v0.4.0/go.mod h1:3ApA0mbhHx6YImmuubf5pyW8srKnCEPON32/5hj+RmM= +cloud.google.com/go/beyondcorp v0.5.0/go.mod h1:uFqj9X+dSfrheVp7ssLTaRHd2EHqSL4QZmH4e8WXGGU= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/bigquery v1.42.0/go.mod h1:8dRTJxhtG+vwBKzE5OseQn/hiydoQN3EedCaOdYmxRA= +cloud.google.com/go/bigquery v1.43.0/go.mod h1:ZMQcXHsl+xmU1z36G2jNGZmKp9zNY5BUua5wDgmNCfw= +cloud.google.com/go/bigquery v1.44.0/go.mod h1:0Y33VqXTEsbamHJvJHdFmtqHvMIY28aK1+dFsvaChGc= +cloud.google.com/go/bigquery v1.47.0/go.mod h1:sA9XOgy0A8vQK9+MWhEQTY6Tix87M/ZurWFIxmF9I/E= +cloud.google.com/go/bigquery v1.48.0/go.mod h1:QAwSz+ipNgfL5jxiaK7weyOhzdoAy1zFm0Nf1fysJac= +cloud.google.com/go/bigquery v1.49.0/go.mod h1:Sv8hMmTFFYBlt/ftw2uN6dFdQPzBlREY9yBh7Oy7/4Q= +cloud.google.com/go/bigquery v1.50.0/go.mod h1:YrleYEh2pSEbgTBZYMJ5SuSr0ML3ypjRB1zgf7pvQLU= +cloud.google.com/go/billing v1.4.0/go.mod h1:g9IdKBEFlItS8bTtlrZdVLWSSdSyFUZKXNS02zKMOZY= +cloud.google.com/go/billing v1.5.0/go.mod h1:mztb1tBc3QekhjSgmpf/CV4LzWXLzCArwpLmP2Gm88s= +cloud.google.com/go/billing v1.6.0/go.mod h1:WoXzguj+BeHXPbKfNWkqVtDdzORazmCjraY+vrxcyvI= +cloud.google.com/go/billing v1.7.0/go.mod h1:q457N3Hbj9lYwwRbnlD7vUpyjq6u5U1RAOArInEiD5Y= +cloud.google.com/go/billing v1.12.0/go.mod h1:yKrZio/eu+okO/2McZEbch17O5CB5NpZhhXG6Z766ss= +cloud.google.com/go/billing v1.13.0/go.mod h1:7kB2W9Xf98hP9Sr12KfECgfGclsH3CQR0R08tnRlRbc= +cloud.google.com/go/binaryauthorization v1.1.0/go.mod h1:xwnoWu3Y84jbuHa0zd526MJYmtnVXn0syOjaJgy4+dM= +cloud.google.com/go/binaryauthorization v1.2.0/go.mod h1:86WKkJHtRcv5ViNABtYMhhNWRrD1Vpi//uKEy7aYEfI= +cloud.google.com/go/binaryauthorization v1.3.0/go.mod h1:lRZbKgjDIIQvzYQS1p99A7/U1JqvqeZg0wiI5tp6tg0= +cloud.google.com/go/binaryauthorization v1.4.0/go.mod h1:tsSPQrBd77VLplV70GUhBf/Zm3FsKmgSqgm4UmiDItk= +cloud.google.com/go/binaryauthorization v1.5.0/go.mod h1:OSe4OU1nN/VswXKRBmciKpo9LulY41gch5c68htf3/Q= +cloud.google.com/go/certificatemanager v1.3.0/go.mod h1:n6twGDvcUBFu9uBgt4eYvvf3sQ6My8jADcOVwHmzadg= +cloud.google.com/go/certificatemanager v1.4.0/go.mod h1:vowpercVFyqs8ABSmrdV+GiFf2H/ch3KyudYQEMM590= +cloud.google.com/go/certificatemanager v1.6.0/go.mod h1:3Hh64rCKjRAX8dXgRAyOcY5vQ/fE1sh8o+Mdd6KPgY8= +cloud.google.com/go/channel v1.8.0/go.mod h1:W5SwCXDJsq/rg3tn3oG0LOxpAo6IMxNa09ngphpSlnk= +cloud.google.com/go/channel v1.9.0/go.mod h1:jcu05W0my9Vx4mt3/rEHpfxc9eKi9XwsdDL8yBMbKUk= +cloud.google.com/go/channel v1.11.0/go.mod h1:IdtI0uWGqhEeatSB62VOoJ8FSUhJ9/+iGkJVqp74CGE= +cloud.google.com/go/channel v1.12.0/go.mod h1:VkxCGKASi4Cq7TbXxlaBezonAYpp1GCnKMY6tnMQnLU= +cloud.google.com/go/cloudbuild v1.3.0/go.mod h1:WequR4ULxlqvMsjDEEEFnOG5ZSRSgWOywXYDb1vPE6U= +cloud.google.com/go/cloudbuild v1.4.0/go.mod h1:5Qwa40LHiOXmz3386FrjrYM93rM/hdRr7b53sySrTqA= +cloud.google.com/go/cloudbuild v1.6.0/go.mod h1:UIbc/w9QCbH12xX+ezUsgblrWv+Cv4Tw83GiSMHOn9M= +cloud.google.com/go/cloudbuild v1.7.0/go.mod h1:zb5tWh2XI6lR9zQmsm1VRA+7OCuve5d8S+zJUul8KTg= +cloud.google.com/go/cloudbuild v1.9.0/go.mod h1:qK1d7s4QlO0VwfYn5YuClDGg2hfmLZEb4wQGAbIgL1s= +cloud.google.com/go/clouddms v1.3.0/go.mod h1:oK6XsCDdW4Ib3jCCBugx+gVjevp2TMXFtgxvPSee3OM= +cloud.google.com/go/clouddms v1.4.0/go.mod h1:Eh7sUGCC+aKry14O1NRljhjyrr0NFC0G2cjwX0cByRk= +cloud.google.com/go/clouddms v1.5.0/go.mod h1:QSxQnhikCLUw13iAbffF2CZxAER3xDGNHjsTAkQJcQA= +cloud.google.com/go/cloudtasks v1.5.0/go.mod h1:fD92REy1x5woxkKEkLdvavGnPJGEn8Uic9nWuLzqCpY= +cloud.google.com/go/cloudtasks v1.6.0/go.mod h1:C6Io+sxuke9/KNRkbQpihnW93SWDU3uXt92nu85HkYI= +cloud.google.com/go/cloudtasks v1.7.0/go.mod h1:ImsfdYWwlWNJbdgPIIGJWC+gemEGTBK/SunNQQNCAb4= +cloud.google.com/go/cloudtasks v1.8.0/go.mod h1:gQXUIwCSOI4yPVK7DgTVFiiP0ZW/eQkydWzwVMdHxrI= +cloud.google.com/go/cloudtasks v1.9.0/go.mod h1:w+EyLsVkLWHcOaqNEyvcKAsWp9p29dL6uL9Nst1cI7Y= +cloud.google.com/go/cloudtasks v1.10.0/go.mod h1:NDSoTLkZ3+vExFEWu2UJV1arUyzVDAiZtdWcsUyNwBs= cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM= cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s= cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= -cloud.google.com/go/compute v1.7.0 h1:v/k9Eueb8aAJ0vZuxKMrgm6kPhCLZU9HxFU+AFDs9Uk= cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U= +cloud.google.com/go/compute v1.10.0/go.mod h1:ER5CLbMxl90o2jtNbGSbtfOpQKR0t15FOtRsugnLrlU= +cloud.google.com/go/compute v1.12.0/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= +cloud.google.com/go/compute v1.12.1/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= +cloud.google.com/go/compute v1.13.0/go.mod h1:5aPTS0cUNMIc1CE546K+Th6weJUNQErARyZtRXDJ8GE= +cloud.google.com/go/compute v1.14.0/go.mod h1:YfLtxrj9sU4Yxv+sXzZkyPjEyPBZfXHUvjxega5vAdo= +cloud.google.com/go/compute v1.18.0/go.mod h1:1X7yHxec2Ga+Ss6jPyjxRxpu2uu7PLgsOVXvgU0yacs= +cloud.google.com/go/compute v1.19.0/go.mod h1:rikpw2y+UMidAe9tISo04EHNOIf42RLYF/q8Bs93scU= +cloud.google.com/go/compute v1.19.1 h1:am86mquDUgjGNWxiGn+5PGLbmgiWXlE/yNWpIpNvuXY= +cloud.google.com/go/compute v1.19.1/go.mod h1:6ylj3a05WF8leseCdIf77NK0g1ey+nj5IKd5/kvShxE= +cloud.google.com/go/compute/metadata v0.1.0/go.mod h1:Z1VN+bulIf6bt4P/C37K4DyZYZEXYonfTBHHFPO/4UU= +cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= +cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= +cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= +cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= +cloud.google.com/go/contactcenterinsights v1.3.0/go.mod h1:Eu2oemoePuEFc/xKFPjbTuPSj0fYJcPls9TFlPNnHHY= +cloud.google.com/go/contactcenterinsights v1.4.0/go.mod h1:L2YzkGbPsv+vMQMCADxJoT9YiTTnSEd6fEvCeHTYVck= +cloud.google.com/go/contactcenterinsights v1.6.0/go.mod h1:IIDlT6CLcDoyv79kDv8iWxMSTZhLxSCofVV5W6YFM/w= +cloud.google.com/go/container v1.6.0/go.mod h1:Xazp7GjJSeUYo688S+6J5V+n/t+G5sKBTFkKNudGRxg= +cloud.google.com/go/container v1.7.0/go.mod h1:Dp5AHtmothHGX3DwwIHPgq45Y8KmNsgN3amoYfxVkLo= +cloud.google.com/go/container v1.13.1/go.mod h1:6wgbMPeQRw9rSnKBCAJXnds3Pzj03C4JHamr8asWKy4= +cloud.google.com/go/container v1.14.0/go.mod h1:3AoJMPhHfLDxLvrlVWaK57IXzaPnLaZq63WX59aQBfM= +cloud.google.com/go/container v1.15.0/go.mod h1:ft+9S0WGjAyjDggg5S06DXj+fHJICWg8L7isCQe9pQA= +cloud.google.com/go/containeranalysis v0.5.1/go.mod h1:1D92jd8gRR/c0fGMlymRgxWD3Qw9C1ff6/T7mLgVL8I= +cloud.google.com/go/containeranalysis v0.6.0/go.mod h1:HEJoiEIu+lEXM+k7+qLCci0h33lX3ZqoYFdmPcoO7s4= +cloud.google.com/go/containeranalysis v0.7.0/go.mod h1:9aUL+/vZ55P2CXfuZjS4UjQ9AgXoSw8Ts6lemfmxBxI= +cloud.google.com/go/containeranalysis v0.9.0/go.mod h1:orbOANbwk5Ejoom+s+DUCTTJ7IBdBQJDcSylAx/on9s= +cloud.google.com/go/datacatalog v1.3.0/go.mod h1:g9svFY6tuR+j+hrTw3J2dNcmI0dzmSiyOzm8kpLq0a0= +cloud.google.com/go/datacatalog v1.5.0/go.mod h1:M7GPLNQeLfWqeIm3iuiruhPzkt65+Bx8dAKvScX8jvs= +cloud.google.com/go/datacatalog v1.6.0/go.mod h1:+aEyF8JKg+uXcIdAmmaMUmZ3q1b/lKLtXCmXdnc0lbc= +cloud.google.com/go/datacatalog v1.7.0/go.mod h1:9mEl4AuDYWw81UGc41HonIHH7/sn52H0/tc8f8ZbZIE= +cloud.google.com/go/datacatalog v1.8.0/go.mod h1:KYuoVOv9BM8EYz/4eMFxrr4DUKhGIOXxZoKYF5wdISM= +cloud.google.com/go/datacatalog v1.8.1/go.mod h1:RJ58z4rMp3gvETA465Vg+ag8BGgBdnRPEMMSTr5Uv+M= +cloud.google.com/go/datacatalog v1.12.0/go.mod h1:CWae8rFkfp6LzLumKOnmVh4+Zle4A3NXLzVJ1d1mRm0= +cloud.google.com/go/datacatalog v1.13.0/go.mod h1:E4Rj9a5ZtAxcQJlEBTLgMTphfP11/lNaAshpoBgemX8= +cloud.google.com/go/dataflow v0.6.0/go.mod h1:9QwV89cGoxjjSR9/r7eFDqqjtvbKxAK2BaYU6PVk9UM= +cloud.google.com/go/dataflow v0.7.0/go.mod h1:PX526vb4ijFMesO1o202EaUmouZKBpjHsTlCtB4parQ= +cloud.google.com/go/dataflow v0.8.0/go.mod h1:Rcf5YgTKPtQyYz8bLYhFoIV/vP39eL7fWNcSOyFfLJE= +cloud.google.com/go/dataform v0.3.0/go.mod h1:cj8uNliRlHpa6L3yVhDOBrUXH+BPAO1+KFMQQNSThKo= +cloud.google.com/go/dataform v0.4.0/go.mod h1:fwV6Y4Ty2yIFL89huYlEkwUPtS7YZinZbzzj5S9FzCE= +cloud.google.com/go/dataform v0.5.0/go.mod h1:GFUYRe8IBa2hcomWplodVmUx/iTL0FrsauObOM3Ipr0= +cloud.google.com/go/dataform v0.6.0/go.mod h1:QPflImQy33e29VuapFdf19oPbE4aYTJxr31OAPV+ulA= +cloud.google.com/go/dataform v0.7.0/go.mod h1:7NulqnVozfHvWUBpMDfKMUESr+85aJsC/2O0o3jWPDE= +cloud.google.com/go/datafusion v1.4.0/go.mod h1:1Zb6VN+W6ALo85cXnM1IKiPw+yQMKMhB9TsTSRDo/38= +cloud.google.com/go/datafusion v1.5.0/go.mod h1:Kz+l1FGHB0J+4XF2fud96WMmRiq/wj8N9u007vyXZ2w= +cloud.google.com/go/datafusion v1.6.0/go.mod h1:WBsMF8F1RhSXvVM8rCV3AeyWVxcC2xY6vith3iw3S+8= +cloud.google.com/go/datalabeling v0.5.0/go.mod h1:TGcJ0G2NzcsXSE/97yWjIZO0bXj0KbVlINXMG9ud42I= +cloud.google.com/go/datalabeling v0.6.0/go.mod h1:WqdISuk/+WIGeMkpw/1q7bK/tFEZxsrFJOJdY2bXvTQ= +cloud.google.com/go/datalabeling v0.7.0/go.mod h1:WPQb1y08RJbmpM3ww0CSUAGweL0SxByuW2E+FU+wXcM= +cloud.google.com/go/dataplex v1.3.0/go.mod h1:hQuRtDg+fCiFgC8j0zV222HvzFQdRd+SVX8gdmFcZzA= +cloud.google.com/go/dataplex v1.4.0/go.mod h1:X51GfLXEMVJ6UN47ESVqvlsRplbLhcsAt0kZCCKsU0A= +cloud.google.com/go/dataplex v1.5.2/go.mod h1:cVMgQHsmfRoI5KFYq4JtIBEUbYwc3c7tXmIDhRmNNVQ= +cloud.google.com/go/dataplex v1.6.0/go.mod h1:bMsomC/aEJOSpHXdFKFGQ1b0TDPIeL28nJObeO1ppRs= +cloud.google.com/go/dataproc v1.7.0/go.mod h1:CKAlMjII9H90RXaMpSxQ8EU6dQx6iAYNPcYPOkSbi8s= +cloud.google.com/go/dataproc v1.8.0/go.mod h1:5OW+zNAH0pMpw14JVrPONsxMQYMBqJuzORhIBfBn9uI= +cloud.google.com/go/dataproc v1.12.0/go.mod h1:zrF3aX0uV3ikkMz6z4uBbIKyhRITnxvr4i3IjKsKrw4= +cloud.google.com/go/dataqna v0.5.0/go.mod h1:90Hyk596ft3zUQ8NkFfvICSIfHFh1Bc7C4cK3vbhkeo= +cloud.google.com/go/dataqna v0.6.0/go.mod h1:1lqNpM7rqNLVgWBJyk5NF6Uen2PHym0jtVJonplVsDA= +cloud.google.com/go/dataqna v0.7.0/go.mod h1:Lx9OcIIeqCrw1a6KdO3/5KMP1wAmTc0slZWwP12Qq3c= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/datastore v1.10.0/go.mod h1:PC5UzAmDEkAmkfaknstTYbNpgE49HAgW2J1gcgUfmdM= +cloud.google.com/go/datastore v1.11.0/go.mod h1:TvGxBIHCS50u8jzG+AW/ppf87v1of8nwzFNgEZU1D3c= +cloud.google.com/go/datastream v1.2.0/go.mod h1:i/uTP8/fZwgATHS/XFu0TcNUhuA0twZxxQ3EyCUQMwo= +cloud.google.com/go/datastream v1.3.0/go.mod h1:cqlOX8xlyYF/uxhiKn6Hbv6WjwPPuI9W2M9SAXwaLLQ= +cloud.google.com/go/datastream v1.4.0/go.mod h1:h9dpzScPhDTs5noEMQVWP8Wx8AFBRyS0s8KWPx/9r0g= +cloud.google.com/go/datastream v1.5.0/go.mod h1:6TZMMNPwjUqZHBKPQ1wwXpb0d5VDVPl2/XoS5yi88q4= +cloud.google.com/go/datastream v1.6.0/go.mod h1:6LQSuswqLa7S4rPAOZFVjHIG3wJIjZcZrw8JDEDJuIs= +cloud.google.com/go/datastream v1.7.0/go.mod h1:uxVRMm2elUSPuh65IbZpzJNMbuzkcvu5CjMqVIUHrww= +cloud.google.com/go/deploy v1.4.0/go.mod h1:5Xghikd4VrmMLNaF6FiRFDlHb59VM59YoDQnOUdsH/c= +cloud.google.com/go/deploy v1.5.0/go.mod h1:ffgdD0B89tToyW/U/D2eL0jN2+IEV/3EMuXHA0l4r+s= +cloud.google.com/go/deploy v1.6.0/go.mod h1:f9PTHehG/DjCom3QH0cntOVRm93uGBDt2vKzAPwpXQI= +cloud.google.com/go/deploy v1.8.0/go.mod h1:z3myEJnA/2wnB4sgjqdMfgxCA0EqC3RBTNcVPs93mtQ= +cloud.google.com/go/dialogflow v1.15.0/go.mod h1:HbHDWs33WOGJgn6rfzBW1Kv807BE3O1+xGbn59zZWI4= +cloud.google.com/go/dialogflow v1.16.1/go.mod h1:po6LlzGfK+smoSmTBnbkIZY2w8ffjz/RcGSS+sh1el0= +cloud.google.com/go/dialogflow v1.17.0/go.mod h1:YNP09C/kXA1aZdBgC/VtXX74G/TKn7XVCcVumTflA+8= +cloud.google.com/go/dialogflow v1.18.0/go.mod h1:trO7Zu5YdyEuR+BhSNOqJezyFQ3aUzz0njv7sMx/iek= +cloud.google.com/go/dialogflow v1.19.0/go.mod h1:JVmlG1TwykZDtxtTXujec4tQ+D8SBFMoosgy+6Gn0s0= +cloud.google.com/go/dialogflow v1.29.0/go.mod h1:b+2bzMe+k1s9V+F2jbJwpHPzrnIyHihAdRFMtn2WXuM= +cloud.google.com/go/dialogflow v1.31.0/go.mod h1:cuoUccuL1Z+HADhyIA7dci3N5zUssgpBJmCzI6fNRB4= +cloud.google.com/go/dialogflow v1.32.0/go.mod h1:jG9TRJl8CKrDhMEcvfcfFkkpp8ZhgPz3sBGmAUYJ2qE= +cloud.google.com/go/dlp v1.6.0/go.mod h1:9eyB2xIhpU0sVwUixfBubDoRwP+GjeUoxxeueZmqvmM= +cloud.google.com/go/dlp v1.7.0/go.mod h1:68ak9vCiMBjbasxeVD17hVPxDEck+ExiHavX8kiHG+Q= +cloud.google.com/go/dlp v1.9.0/go.mod h1:qdgmqgTyReTz5/YNSSuueR8pl7hO0o9bQ39ZhtgkWp4= +cloud.google.com/go/documentai v1.7.0/go.mod h1:lJvftZB5NRiFSX4moiye1SMxHx0Bc3x1+p9e/RfXYiU= +cloud.google.com/go/documentai v1.8.0/go.mod h1:xGHNEB7CtsnySCNrCFdCyyMz44RhFEEX2Q7UD0c5IhU= +cloud.google.com/go/documentai v1.9.0/go.mod h1:FS5485S8R00U10GhgBC0aNGrJxBP8ZVpEeJ7PQDZd6k= +cloud.google.com/go/documentai v1.10.0/go.mod h1:vod47hKQIPeCfN2QS/jULIvQTugbmdc0ZvxxfQY1bg4= +cloud.google.com/go/documentai v1.16.0/go.mod h1:o0o0DLTEZ+YnJZ+J4wNfTxmDVyrkzFvttBXXtYRMHkM= +cloud.google.com/go/documentai v1.18.0/go.mod h1:F6CK6iUH8J81FehpskRmhLq/3VlwQvb7TvwOceQ2tbs= +cloud.google.com/go/domains v0.6.0/go.mod h1:T9Rz3GasrpYk6mEGHh4rymIhjlnIuB4ofT1wTxDeT4Y= +cloud.google.com/go/domains v0.7.0/go.mod h1:PtZeqS1xjnXuRPKE/88Iru/LdfoRyEHYA9nFQf4UKpg= +cloud.google.com/go/domains v0.8.0/go.mod h1:M9i3MMDzGFXsydri9/vW+EWz9sWb4I6WyHqdlAk0idE= +cloud.google.com/go/edgecontainer v0.1.0/go.mod h1:WgkZ9tp10bFxqO8BLPqv2LlfmQF1X8lZqwW4r1BTajk= +cloud.google.com/go/edgecontainer v0.2.0/go.mod h1:RTmLijy+lGpQ7BXuTDa4C4ssxyXT34NIuHIgKuP4s5w= +cloud.google.com/go/edgecontainer v0.3.0/go.mod h1:FLDpP4nykgwwIfcLt6zInhprzw0lEi2P1fjO6Ie0qbc= +cloud.google.com/go/edgecontainer v1.0.0/go.mod h1:cttArqZpBB2q58W/upSG++ooo6EsblxDIolxa3jSjbY= +cloud.google.com/go/errorreporting v0.3.0/go.mod h1:xsP2yaAp+OAW4OIm60An2bbLpqIhKXdWR/tawvl7QzU= +cloud.google.com/go/essentialcontacts v1.3.0/go.mod h1:r+OnHa5jfj90qIfZDO/VztSFqbQan7HV75p8sA+mdGI= +cloud.google.com/go/essentialcontacts v1.4.0/go.mod h1:8tRldvHYsmnBCHdFpvU+GL75oWiBKl80BiqlFh9tp+8= +cloud.google.com/go/essentialcontacts v1.5.0/go.mod h1:ay29Z4zODTuwliK7SnX8E86aUF2CTzdNtvv42niCX0M= +cloud.google.com/go/eventarc v1.7.0/go.mod h1:6ctpF3zTnaQCxUjHUdcfgcA1A2T309+omHZth7gDfmc= +cloud.google.com/go/eventarc v1.8.0/go.mod h1:imbzxkyAU4ubfsaKYdQg04WS1NvncblHEup4kvF+4gw= +cloud.google.com/go/eventarc v1.10.0/go.mod h1:u3R35tmZ9HvswGRBnF48IlYgYeBcPUCjkr4BTdem2Kw= +cloud.google.com/go/eventarc v1.11.0/go.mod h1:PyUjsUKPWoRBCHeOxZd/lbOOjahV41icXyUY5kSTvVY= +cloud.google.com/go/filestore v1.3.0/go.mod h1:+qbvHGvXU1HaKX2nD0WEPo92TP/8AQuCVEBXNY9z0+w= +cloud.google.com/go/filestore v1.4.0/go.mod h1:PaG5oDfo9r224f8OYXURtAsY+Fbyq/bLYoINEK8XQAI= +cloud.google.com/go/filestore v1.5.0/go.mod h1:FqBXDWBp4YLHqRnVGveOkHDf8svj9r5+mUDLupOWEDs= +cloud.google.com/go/filestore v1.6.0/go.mod h1:di5unNuss/qfZTw2U9nhFqo8/ZDSc466dre85Kydllg= cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= +cloud.google.com/go/firestore v1.9.0/go.mod h1:HMkjKHNTtRyZNiMzu7YAsLr9K3X2udY2AMwDaMEQiiE= +cloud.google.com/go/functions v1.6.0/go.mod h1:3H1UA3qiIPRWD7PeZKLvHZ9SaQhR26XIJcC0A5GbvAk= +cloud.google.com/go/functions v1.7.0/go.mod h1:+d+QBcWM+RsrgZfV9xo6KfA1GlzJfxcfZcRPEhDDfzg= +cloud.google.com/go/functions v1.8.0/go.mod h1:RTZ4/HsQjIqIYP9a9YPbU+QFoQsAlYgrwOXJWHn1POY= +cloud.google.com/go/functions v1.9.0/go.mod h1:Y+Dz8yGguzO3PpIjhLTbnqV1CWmgQ5UwtlpzoyquQ08= +cloud.google.com/go/functions v1.10.0/go.mod h1:0D3hEOe3DbEvCXtYOZHQZmD+SzYsi1YbI7dGvHfldXw= +cloud.google.com/go/functions v1.12.0/go.mod h1:AXWGrF3e2C/5ehvwYo/GH6O5s09tOPksiKhz+hH8WkA= +cloud.google.com/go/functions v1.13.0/go.mod h1:EU4O007sQm6Ef/PwRsI8N2umygGqPBS/IZQKBQBcJ3c= +cloud.google.com/go/gaming v1.5.0/go.mod h1:ol7rGcxP/qHTRQE/RO4bxkXq+Fix0j6D4LFPzYTIrDM= +cloud.google.com/go/gaming v1.6.0/go.mod h1:YMU1GEvA39Qt3zWGyAVA9bpYz/yAhTvaQ1t2sK4KPUA= +cloud.google.com/go/gaming v1.7.0/go.mod h1:LrB8U7MHdGgFG851iHAfqUdLcKBdQ55hzXy9xBJz0+w= +cloud.google.com/go/gaming v1.8.0/go.mod h1:xAqjS8b7jAVW0KFYeRUxngo9My3f33kFmua++Pi+ggM= +cloud.google.com/go/gaming v1.9.0/go.mod h1:Fc7kEmCObylSWLO334NcO+O9QMDyz+TKC4v1D7X+Bc0= +cloud.google.com/go/gkebackup v0.2.0/go.mod h1:XKvv/4LfG829/B8B7xRkk8zRrOEbKtEam6yNfuQNH60= +cloud.google.com/go/gkebackup v0.3.0/go.mod h1:n/E671i1aOQvUxT541aTkCwExO/bTer2HDlj4TsBRAo= +cloud.google.com/go/gkebackup v0.4.0/go.mod h1:byAyBGUwYGEEww7xsbnUTBHIYcOPy/PgUWUtOeRm9Vg= +cloud.google.com/go/gkeconnect v0.5.0/go.mod h1:c5lsNAg5EwAy7fkqX/+goqFsU1Da/jQFqArp+wGNr/o= +cloud.google.com/go/gkeconnect v0.6.0/go.mod h1:Mln67KyU/sHJEBY8kFZ0xTeyPtzbq9StAVvEULYK16A= +cloud.google.com/go/gkeconnect v0.7.0/go.mod h1:SNfmVqPkaEi3bF/B3CNZOAYPYdg7sU+obZ+QTky2Myw= +cloud.google.com/go/gkehub v0.9.0/go.mod h1:WYHN6WG8w9bXU0hqNxt8rm5uxnk8IH+lPY9J2TV7BK0= +cloud.google.com/go/gkehub v0.10.0/go.mod h1:UIPwxI0DsrpsVoWpLB0stwKCP+WFVG9+y977wO+hBH0= +cloud.google.com/go/gkehub v0.11.0/go.mod h1:JOWHlmN+GHyIbuWQPl47/C2RFhnFKH38jH9Ascu3n0E= +cloud.google.com/go/gkehub v0.12.0/go.mod h1:djiIwwzTTBrF5NaXCGv3mf7klpEMcST17VBTVVDcuaw= +cloud.google.com/go/gkemulticloud v0.3.0/go.mod h1:7orzy7O0S+5kq95e4Hpn7RysVA7dPs8W/GgfUtsPbrA= +cloud.google.com/go/gkemulticloud v0.4.0/go.mod h1:E9gxVBnseLWCk24ch+P9+B2CoDFJZTyIgLKSalC7tuI= +cloud.google.com/go/gkemulticloud v0.5.0/go.mod h1:W0JDkiyi3Tqh0TJr//y19wyb1yf8llHVto2Htf2Ja3Y= +cloud.google.com/go/grafeas v0.2.0/go.mod h1:KhxgtF2hb0P191HlY5besjYm6MqTSTj3LSI+M+ByZHc= +cloud.google.com/go/gsuiteaddons v1.3.0/go.mod h1:EUNK/J1lZEZO8yPtykKxLXI6JSVN2rg9bN8SXOa0bgM= +cloud.google.com/go/gsuiteaddons v1.4.0/go.mod h1:rZK5I8hht7u7HxFQcFei0+AtfS9uSushomRlg+3ua1o= +cloud.google.com/go/gsuiteaddons v1.5.0/go.mod h1:TFCClYLd64Eaa12sFVmUyG62tk4mdIsI7pAnSXRkcFo= +cloud.google.com/go/iam v0.1.0/go.mod h1:vcUNEa0pEm0qRVpmWepWaFMIAI8/hjB9mO8rNCJtF6c= cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= +cloud.google.com/go/iam v0.5.0/go.mod h1:wPU9Vt0P4UmCux7mqtRu6jcpPAb74cP1fh50J3QpkUc= +cloud.google.com/go/iam v0.6.0/go.mod h1:+1AH33ueBne5MzYccyMHtEKqLE4/kJOibtffMHDMFMc= +cloud.google.com/go/iam v0.7.0/go.mod h1:H5Br8wRaDGNc8XP3keLc4unfUUZeyH3Sfl9XpQEYOeg= +cloud.google.com/go/iam v0.8.0/go.mod h1:lga0/y3iH6CX7sYqypWJ33hf7kkfXJag67naqGESjkE= +cloud.google.com/go/iam v0.11.0/go.mod h1:9PiLDanza5D+oWFZiH1uG+RnRCfEGKoyl6yo4cgWZGY= +cloud.google.com/go/iam v0.12.0/go.mod h1:knyHGviacl11zrtZUoDuYpDgLjvr28sLQaG0YB2GYAY= +cloud.google.com/go/iam v0.13.0/go.mod h1:ljOg+rcNfzZ5d6f1nAUJ8ZIxOaZUVoS14bKCtaLZ/D0= +cloud.google.com/go/iap v1.4.0/go.mod h1:RGFwRJdihTINIe4wZ2iCP0zF/qu18ZwyKxrhMhygBEc= +cloud.google.com/go/iap v1.5.0/go.mod h1:UH/CGgKd4KyohZL5Pt0jSKE4m3FR51qg6FKQ/z/Ix9A= +cloud.google.com/go/iap v1.6.0/go.mod h1:NSuvI9C/j7UdjGjIde7t7HBz+QTwBcapPE07+sSRcLk= +cloud.google.com/go/iap v1.7.0/go.mod h1:beqQx56T9O1G1yNPph+spKpNibDlYIiIixiqsQXxLIo= +cloud.google.com/go/iap v1.7.1/go.mod h1:WapEwPc7ZxGt2jFGB/C/bm+hP0Y6NXzOYGjpPnmMS74= +cloud.google.com/go/ids v1.1.0/go.mod h1:WIuwCaYVOzHIj2OhN9HAwvW+DBdmUAdcWlFxRl+KubM= +cloud.google.com/go/ids v1.2.0/go.mod h1:5WXvp4n25S0rA/mQWAg1YEEBBq6/s+7ml1RDCW1IrcY= +cloud.google.com/go/ids v1.3.0/go.mod h1:JBdTYwANikFKaDP6LtW5JAi4gubs57SVNQjemdt6xV4= +cloud.google.com/go/iot v1.3.0/go.mod h1:r7RGh2B61+B8oz0AGE+J72AhA0G7tdXItODWsaA2oLs= +cloud.google.com/go/iot v1.4.0/go.mod h1:dIDxPOn0UvNDUMD8Ger7FIaTuvMkj+aGk94RPP0iV+g= +cloud.google.com/go/iot v1.5.0/go.mod h1:mpz5259PDl3XJthEmh9+ap0affn/MqNSP4My77Qql9o= +cloud.google.com/go/iot v1.6.0/go.mod h1:IqdAsmE2cTYYNO1Fvjfzo9po179rAtJeVGUvkLN3rLE= +cloud.google.com/go/kms v1.4.0/go.mod h1:fajBHndQ+6ubNw6Ss2sSd+SWvjL26RNo/dr7uxsnnOA= +cloud.google.com/go/kms v1.5.0/go.mod h1:QJS2YY0eJGBg3mnDfuaCyLauWwBJiHRboYxJ++1xJNg= +cloud.google.com/go/kms v1.6.0/go.mod h1:Jjy850yySiasBUDi6KFUwUv2n1+o7QZFyuUJg6OgjA0= +cloud.google.com/go/kms v1.8.0/go.mod h1:4xFEhYFqvW+4VMELtZyxomGSYtSQKzM178ylFW4jMAg= +cloud.google.com/go/kms v1.9.0/go.mod h1:qb1tPTgfF9RQP8e1wq4cLFErVuTJv7UsSC915J8dh3w= +cloud.google.com/go/kms v1.10.0/go.mod h1:ng3KTUtQQU9bPX3+QGLsflZIHlkbn8amFAMY63m8d24= +cloud.google.com/go/kms v1.10.1/go.mod h1:rIWk/TryCkR59GMC3YtHtXeLzd634lBbKenvyySAyYI= +cloud.google.com/go/language v1.4.0/go.mod h1:F9dRpNFQmJbkaop6g0JhSBXCNlO90e1KWx5iDdxbWic= +cloud.google.com/go/language v1.6.0/go.mod h1:6dJ8t3B+lUYfStgls25GusK04NLh3eDLQnWM3mdEbhI= +cloud.google.com/go/language v1.7.0/go.mod h1:DJ6dYN/W+SQOjF8e1hLQXMF21AkH2w9wiPzPCJa2MIE= +cloud.google.com/go/language v1.8.0/go.mod h1:qYPVHf7SPoNNiCL2Dr0FfEFNil1qi3pQEyygwpgVKB8= +cloud.google.com/go/language v1.9.0/go.mod h1:Ns15WooPM5Ad/5no/0n81yUetis74g3zrbeJBE+ptUY= +cloud.google.com/go/lifesciences v0.5.0/go.mod h1:3oIKy8ycWGPUyZDR/8RNnTOYevhaMLqh5vLUXs9zvT8= +cloud.google.com/go/lifesciences v0.6.0/go.mod h1:ddj6tSX/7BOnhxCSd3ZcETvtNr8NZ6t/iPhY2Tyfu08= +cloud.google.com/go/lifesciences v0.8.0/go.mod h1:lFxiEOMqII6XggGbOnKiyZ7IBwoIqA84ClvoezaA/bo= +cloud.google.com/go/logging v1.6.1/go.mod h1:5ZO0mHHbvm8gEmeEUHrmDlTDSu5imF6MUP9OfilNXBw= +cloud.google.com/go/logging v1.7.0/go.mod h1:3xjP2CjkM3ZkO73aj4ASA5wRPGGCRrPIAeNqVNkzY8M= +cloud.google.com/go/longrunning v0.1.1/go.mod h1:UUFxuDWkv22EuY93jjmDMFT5GPQKeFVJBIF6QlTqdsE= +cloud.google.com/go/longrunning v0.3.0/go.mod h1:qth9Y41RRSUE69rDcOn6DdK3HfQfsUI0YSmW3iIlLJc= +cloud.google.com/go/longrunning v0.4.1 h1:v+yFJOfKC3yZdY6ZUI933pIYdhyhV8S3NpWrXWmg7jM= +cloud.google.com/go/longrunning v0.4.1/go.mod h1:4iWDqhBZ70CvZ6BfETbvam3T8FMvLK+eFj0E6AaRQTo= +cloud.google.com/go/managedidentities v1.3.0/go.mod h1:UzlW3cBOiPrzucO5qWkNkh0w33KFtBJU281hacNvsdE= +cloud.google.com/go/managedidentities v1.4.0/go.mod h1:NWSBYbEMgqmbZsLIyKvxrYbtqOsxY1ZrGM+9RgDqInM= +cloud.google.com/go/managedidentities v1.5.0/go.mod h1:+dWcZ0JlUmpuxpIDfyP5pP5y0bLdRwOS4Lp7gMni/LA= +cloud.google.com/go/maps v0.1.0/go.mod h1:BQM97WGyfw9FWEmQMpZ5T6cpovXXSd1cGmFma94eubI= +cloud.google.com/go/maps v0.6.0/go.mod h1:o6DAMMfb+aINHz/p/jbcY+mYeXBoZoxTfdSQ8VAJaCw= +cloud.google.com/go/maps v0.7.0/go.mod h1:3GnvVl3cqeSvgMcpRlQidXsPYuDGQ8naBis7MVzpXsY= +cloud.google.com/go/mediatranslation v0.5.0/go.mod h1:jGPUhGTybqsPQn91pNXw0xVHfuJ3leR1wj37oU3y1f4= +cloud.google.com/go/mediatranslation v0.6.0/go.mod h1:hHdBCTYNigsBxshbznuIMFNe5QXEowAuNmmC7h8pu5w= +cloud.google.com/go/mediatranslation v0.7.0/go.mod h1:LCnB/gZr90ONOIQLgSXagp8XUW1ODs2UmUMvcgMfI2I= +cloud.google.com/go/memcache v1.4.0/go.mod h1:rTOfiGZtJX1AaFUrOgsMHX5kAzaTQ8azHiuDoTPzNsE= +cloud.google.com/go/memcache v1.5.0/go.mod h1:dk3fCK7dVo0cUU2c36jKb4VqKPS22BTkf81Xq617aWM= +cloud.google.com/go/memcache v1.6.0/go.mod h1:XS5xB0eQZdHtTuTF9Hf8eJkKtR3pVRCcvJwtm68T3rA= +cloud.google.com/go/memcache v1.7.0/go.mod h1:ywMKfjWhNtkQTxrWxCkCFkoPjLHPW6A7WOTVI8xy3LY= +cloud.google.com/go/memcache v1.9.0/go.mod h1:8oEyzXCu+zo9RzlEaEjHl4KkgjlNDaXbCQeQWlzNFJM= +cloud.google.com/go/metastore v1.5.0/go.mod h1:2ZNrDcQwghfdtCwJ33nM0+GrBGlVuh8rakL3vdPY3XY= +cloud.google.com/go/metastore v1.6.0/go.mod h1:6cyQTls8CWXzk45G55x57DVQ9gWg7RiH65+YgPsNh9s= +cloud.google.com/go/metastore v1.7.0/go.mod h1:s45D0B4IlsINu87/AsWiEVYbLaIMeUSoxlKKDqBGFS8= +cloud.google.com/go/metastore v1.8.0/go.mod h1:zHiMc4ZUpBiM7twCIFQmJ9JMEkDSyZS9U12uf7wHqSI= +cloud.google.com/go/metastore v1.10.0/go.mod h1:fPEnH3g4JJAk+gMRnrAnoqyv2lpUCqJPWOodSaf45Eo= +cloud.google.com/go/monitoring v1.7.0/go.mod h1:HpYse6kkGo//7p6sT0wsIC6IBDET0RhIsnmlA53dvEk= +cloud.google.com/go/monitoring v1.8.0/go.mod h1:E7PtoMJ1kQXWxPjB6mv2fhC5/15jInuulFdYYtlcvT4= +cloud.google.com/go/monitoring v1.12.0/go.mod h1:yx8Jj2fZNEkL/GYZyTLS4ZtZEZN8WtDEiEqG4kLK50w= +cloud.google.com/go/monitoring v1.13.0/go.mod h1:k2yMBAB1H9JT/QETjNkgdCGD9bPF712XiLTVr+cBrpw= +cloud.google.com/go/networkconnectivity v1.4.0/go.mod h1:nOl7YL8odKyAOtzNX73/M5/mGZgqqMeryi6UPZTk/rA= +cloud.google.com/go/networkconnectivity v1.5.0/go.mod h1:3GzqJx7uhtlM3kln0+x5wyFvuVH1pIBJjhCpjzSt75o= +cloud.google.com/go/networkconnectivity v1.6.0/go.mod h1:OJOoEXW+0LAxHh89nXd64uGG+FbQoeH8DtxCHVOMlaM= +cloud.google.com/go/networkconnectivity v1.7.0/go.mod h1:RMuSbkdbPwNMQjB5HBWD5MpTBnNm39iAVpC3TmsExt8= +cloud.google.com/go/networkconnectivity v1.10.0/go.mod h1:UP4O4sWXJG13AqrTdQCD9TnLGEbtNRqjuaaA7bNjF5E= +cloud.google.com/go/networkconnectivity v1.11.0/go.mod h1:iWmDD4QF16VCDLXUqvyspJjIEtBR/4zq5hwnY2X3scM= +cloud.google.com/go/networkmanagement v1.4.0/go.mod h1:Q9mdLLRn60AsOrPc8rs8iNV6OHXaGcDdsIQe1ohekq8= +cloud.google.com/go/networkmanagement v1.5.0/go.mod h1:ZnOeZ/evzUdUsnvRt792H0uYEnHQEMaz+REhhzJRcf4= +cloud.google.com/go/networkmanagement v1.6.0/go.mod h1:5pKPqyXjB/sgtvB5xqOemumoQNB7y95Q7S+4rjSOPYY= +cloud.google.com/go/networksecurity v0.5.0/go.mod h1:xS6fOCoqpVC5zx15Z/MqkfDwH4+m/61A3ODiDV1xmiQ= +cloud.google.com/go/networksecurity v0.6.0/go.mod h1:Q5fjhTr9WMI5mbpRYEbiexTzROf7ZbDzvzCrNl14nyU= +cloud.google.com/go/networksecurity v0.7.0/go.mod h1:mAnzoxx/8TBSyXEeESMy9OOYwo1v+gZ5eMRnsT5bC8k= +cloud.google.com/go/networksecurity v0.8.0/go.mod h1:B78DkqsxFG5zRSVuwYFRZ9Xz8IcQ5iECsNrPn74hKHU= +cloud.google.com/go/notebooks v1.2.0/go.mod h1:9+wtppMfVPUeJ8fIWPOq1UnATHISkGXGqTkxeieQ6UY= +cloud.google.com/go/notebooks v1.3.0/go.mod h1:bFR5lj07DtCPC7YAAJ//vHskFBxA5JzYlH68kXVdk34= +cloud.google.com/go/notebooks v1.4.0/go.mod h1:4QPMngcwmgb6uw7Po99B2xv5ufVoIQ7nOGDyL4P8AgA= +cloud.google.com/go/notebooks v1.5.0/go.mod h1:q8mwhnP9aR8Hpfnrc5iN5IBhrXUy8S2vuYs+kBJ/gu0= +cloud.google.com/go/notebooks v1.7.0/go.mod h1:PVlaDGfJgj1fl1S3dUwhFMXFgfYGhYQt2164xOMONmE= +cloud.google.com/go/notebooks v1.8.0/go.mod h1:Lq6dYKOYOWUCTvw5t2q1gp1lAp0zxAxRycayS0iJcqQ= +cloud.google.com/go/optimization v1.1.0/go.mod h1:5po+wfvX5AQlPznyVEZjGJTMr4+CAkJf2XSTQOOl9l4= +cloud.google.com/go/optimization v1.2.0/go.mod h1:Lr7SOHdRDENsh+WXVmQhQTrzdu9ybg0NecjHidBq6xs= +cloud.google.com/go/optimization v1.3.1/go.mod h1:IvUSefKiwd1a5p0RgHDbWCIbDFgKuEdB+fPPuP0IDLI= +cloud.google.com/go/orchestration v1.3.0/go.mod h1:Sj5tq/JpWiB//X/q3Ngwdl5K7B7Y0KZ7bfv0wL6fqVA= +cloud.google.com/go/orchestration v1.4.0/go.mod h1:6W5NLFWs2TlniBphAViZEVhrXRSMgUGDfW7vrWKvsBk= +cloud.google.com/go/orchestration v1.6.0/go.mod h1:M62Bevp7pkxStDfFfTuCOaXgaaqRAga1yKyoMtEoWPQ= +cloud.google.com/go/orgpolicy v1.4.0/go.mod h1:xrSLIV4RePWmP9P3tBl8S93lTmlAxjm06NSm2UTmKvE= +cloud.google.com/go/orgpolicy v1.5.0/go.mod h1:hZEc5q3wzwXJaKrsx5+Ewg0u1LxJ51nNFlext7Tanwc= +cloud.google.com/go/orgpolicy v1.10.0/go.mod h1:w1fo8b7rRqlXlIJbVhOMPrwVljyuW5mqssvBtU18ONc= +cloud.google.com/go/osconfig v1.7.0/go.mod h1:oVHeCeZELfJP7XLxcBGTMBvRO+1nQ5tFG9VQTmYS2Fs= +cloud.google.com/go/osconfig v1.8.0/go.mod h1:EQqZLu5w5XA7eKizepumcvWx+m8mJUhEwiPqWiZeEdg= +cloud.google.com/go/osconfig v1.9.0/go.mod h1:Yx+IeIZJ3bdWmzbQU4fxNl8xsZ4amB+dygAwFPlvnNo= +cloud.google.com/go/osconfig v1.10.0/go.mod h1:uMhCzqC5I8zfD9zDEAfvgVhDS8oIjySWh+l4WK6GnWw= +cloud.google.com/go/osconfig v1.11.0/go.mod h1:aDICxrur2ogRd9zY5ytBLV89KEgT2MKB2L/n6x1ooPw= +cloud.google.com/go/oslogin v1.4.0/go.mod h1:YdgMXWRaElXz/lDk1Na6Fh5orF7gvmJ0FGLIs9LId4E= +cloud.google.com/go/oslogin v1.5.0/go.mod h1:D260Qj11W2qx/HVF29zBg+0fd6YCSjSqLUkY/qEenQU= +cloud.google.com/go/oslogin v1.6.0/go.mod h1:zOJ1O3+dTU8WPlGEkFSh7qeHPPSoxrcMbbK1Nm2iX70= +cloud.google.com/go/oslogin v1.7.0/go.mod h1:e04SN0xO1UNJ1M5GP0vzVBFicIe4O53FOfcixIqTyXo= +cloud.google.com/go/oslogin v1.9.0/go.mod h1:HNavntnH8nzrn8JCTT5fj18FuJLFJc4NaZJtBnQtKFs= +cloud.google.com/go/phishingprotection v0.5.0/go.mod h1:Y3HZknsK9bc9dMi+oE8Bim0lczMU6hrX0UpADuMefr0= +cloud.google.com/go/phishingprotection v0.6.0/go.mod h1:9Y3LBLgy0kDTcYET8ZH3bq/7qni15yVUoAxiFxnlSUA= +cloud.google.com/go/phishingprotection v0.7.0/go.mod h1:8qJI4QKHoda/sb/7/YmMQ2omRLSLYSu9bU0EKCNI+Lk= +cloud.google.com/go/policytroubleshooter v1.3.0/go.mod h1:qy0+VwANja+kKrjlQuOzmlvscn4RNsAc0e15GGqfMxg= +cloud.google.com/go/policytroubleshooter v1.4.0/go.mod h1:DZT4BcRw3QoO8ota9xw/LKtPa8lKeCByYeKTIf/vxdE= +cloud.google.com/go/policytroubleshooter v1.5.0/go.mod h1:Rz1WfV+1oIpPdN2VvvuboLVRsB1Hclg3CKQ53j9l8vw= +cloud.google.com/go/policytroubleshooter v1.6.0/go.mod h1:zYqaPTsmfvpjm5ULxAyD/lINQxJ0DDsnWOP/GZ7xzBc= +cloud.google.com/go/privatecatalog v0.5.0/go.mod h1:XgosMUvvPyxDjAVNDYxJ7wBW8//hLDDYmnsNcMGq1K0= +cloud.google.com/go/privatecatalog v0.6.0/go.mod h1:i/fbkZR0hLN29eEWiiwue8Pb+GforiEIBnV9yrRUOKI= +cloud.google.com/go/privatecatalog v0.7.0/go.mod h1:2s5ssIFO69F5csTXcwBP7NPFTZvps26xGzvQ2PQaBYg= +cloud.google.com/go/privatecatalog v0.8.0/go.mod h1:nQ6pfaegeDAq/Q5lrfCQzQLhubPiZhSaNhIgfJlnIXs= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= cloud.google.com/go/pubsub v1.5.0/go.mod h1:ZEwJccE3z93Z2HWvstpri00jOg7oO4UZDtKhwDwqF0w= +cloud.google.com/go/pubsub v1.26.0/go.mod h1:QgBH3U/jdJy/ftjPhTkyXNj543Tin1pRYcdcPRnFIRI= +cloud.google.com/go/pubsub v1.27.1/go.mod h1:hQN39ymbV9geqBnfQq6Xf63yNhUAhv9CZhzp5O6qsW0= +cloud.google.com/go/pubsub v1.28.0/go.mod h1:vuXFpwaVoIPQMGXqRyUQigu/AX1S3IWugR9xznmcXX8= +cloud.google.com/go/pubsub v1.30.0/go.mod h1:qWi1OPS0B+b5L+Sg6Gmc9zD1Y+HaM0MdUr7LsupY1P4= +cloud.google.com/go/pubsublite v1.5.0/go.mod h1:xapqNQ1CuLfGi23Yda/9l4bBCKz/wC3KIJ5gKcxveZg= +cloud.google.com/go/pubsublite v1.6.0/go.mod h1:1eFCS0U11xlOuMFV/0iBqw3zP12kddMeCbj/F3FSj9k= +cloud.google.com/go/pubsublite v1.7.0/go.mod h1:8hVMwRXfDfvGm3fahVbtDbiLePT3gpoiJYJY+vxWxVM= +cloud.google.com/go/recaptchaenterprise v1.3.1/go.mod h1:OdD+q+y4XGeAlxRaMn1Y7/GveP6zmq76byL6tjPE7d4= +cloud.google.com/go/recaptchaenterprise/v2 v2.1.0/go.mod h1:w9yVqajwroDNTfGuhmOjPDN//rZGySaf6PtFVcSCa7o= +cloud.google.com/go/recaptchaenterprise/v2 v2.2.0/go.mod h1:/Zu5jisWGeERrd5HnlS3EUGb/D335f9k51B/FVil0jk= +cloud.google.com/go/recaptchaenterprise/v2 v2.3.0/go.mod h1:O9LwGCjrhGHBQET5CA7dd5NwwNQUErSgEDit1DLNTdo= +cloud.google.com/go/recaptchaenterprise/v2 v2.4.0/go.mod h1:Am3LHfOuBstrLrNCBrlI5sbwx9LBg3te2N6hGvHn2mE= +cloud.google.com/go/recaptchaenterprise/v2 v2.5.0/go.mod h1:O8LzcHXN3rz0j+LBC91jrwI3R+1ZSZEWrfL7XHgNo9U= +cloud.google.com/go/recaptchaenterprise/v2 v2.6.0/go.mod h1:RPauz9jeLtB3JVzg6nCbe12qNoaa8pXc4d/YukAmcnA= +cloud.google.com/go/recaptchaenterprise/v2 v2.7.0/go.mod h1:19wVj/fs5RtYtynAPJdDTb69oW0vNHYDBTbB4NvMD9c= +cloud.google.com/go/recommendationengine v0.5.0/go.mod h1:E5756pJcVFeVgaQv3WNpImkFP8a+RptV6dDLGPILjvg= +cloud.google.com/go/recommendationengine v0.6.0/go.mod h1:08mq2umu9oIqc7tDy8sx+MNJdLG0fUi3vaSVbztHgJ4= +cloud.google.com/go/recommendationengine v0.7.0/go.mod h1:1reUcE3GIu6MeBz/h5xZJqNLuuVjNg1lmWMPyjatzac= +cloud.google.com/go/recommender v1.5.0/go.mod h1:jdoeiBIVrJe9gQjwd759ecLJbxCDED4A6p+mqoqDvTg= +cloud.google.com/go/recommender v1.6.0/go.mod h1:+yETpm25mcoiECKh9DEScGzIRyDKpZ0cEhWGo+8bo+c= +cloud.google.com/go/recommender v1.7.0/go.mod h1:XLHs/W+T8olwlGOgfQenXBTbIseGclClff6lhFVe9Bs= +cloud.google.com/go/recommender v1.8.0/go.mod h1:PkjXrTT05BFKwxaUxQmtIlrtj0kph108r02ZZQ5FE70= +cloud.google.com/go/recommender v1.9.0/go.mod h1:PnSsnZY7q+VL1uax2JWkt/UegHssxjUVVCrX52CuEmQ= +cloud.google.com/go/redis v1.7.0/go.mod h1:V3x5Jq1jzUcg+UNsRvdmsfuFnit1cfe3Z/PGyq/lm4Y= +cloud.google.com/go/redis v1.8.0/go.mod h1:Fm2szCDavWzBk2cDKxrkmWBqoCiL1+Ctwq7EyqBCA/A= +cloud.google.com/go/redis v1.9.0/go.mod h1:HMYQuajvb2D0LvMgZmLDZW8V5aOC/WxstZHiy4g8OiA= +cloud.google.com/go/redis v1.10.0/go.mod h1:ThJf3mMBQtW18JzGgh41/Wld6vnDDc/F/F35UolRZPM= +cloud.google.com/go/redis v1.11.0/go.mod h1:/X6eicana+BWcUda5PpwZC48o37SiFVTFSs0fWAJ7uQ= +cloud.google.com/go/resourcemanager v1.3.0/go.mod h1:bAtrTjZQFJkiWTPDb1WBjzvc6/kifjj4QBYuKCCoqKA= +cloud.google.com/go/resourcemanager v1.4.0/go.mod h1:MwxuzkumyTX7/a3n37gmsT3py7LIXwrShilPh3P1tR0= +cloud.google.com/go/resourcemanager v1.5.0/go.mod h1:eQoXNAiAvCf5PXxWxXjhKQoTMaUSNrEfg+6qdf/wots= +cloud.google.com/go/resourcemanager v1.6.0/go.mod h1:YcpXGRs8fDzcUl1Xw8uOVmI8JEadvhRIkoXXUNVYcVo= +cloud.google.com/go/resourcemanager v1.7.0/go.mod h1:HlD3m6+bwhzj9XCouqmeiGuni95NTrExfhoSrkC/3EI= +cloud.google.com/go/resourcesettings v1.3.0/go.mod h1:lzew8VfESA5DQ8gdlHwMrqZs1S9V87v3oCnKCWoOuQU= +cloud.google.com/go/resourcesettings v1.4.0/go.mod h1:ldiH9IJpcrlC3VSuCGvjR5of/ezRrOxFtpJoJo5SmXg= +cloud.google.com/go/resourcesettings v1.5.0/go.mod h1:+xJF7QSG6undsQDfsCJyqWXyBwUoJLhetkRMDRnIoXA= +cloud.google.com/go/retail v1.8.0/go.mod h1:QblKS8waDmNUhghY2TI9O3JLlFk8jybHeV4BF19FrE4= +cloud.google.com/go/retail v1.9.0/go.mod h1:g6jb6mKuCS1QKnH/dpu7isX253absFl6iE92nHwlBUY= +cloud.google.com/go/retail v1.10.0/go.mod h1:2gDk9HsL4HMS4oZwz6daui2/jmKvqShXKQuB2RZ+cCc= +cloud.google.com/go/retail v1.11.0/go.mod h1:MBLk1NaWPmh6iVFSz9MeKG/Psyd7TAgm6y/9L2B4x9Y= +cloud.google.com/go/retail v1.12.0/go.mod h1:UMkelN/0Z8XvKymXFbD4EhFJlYKRx1FGhQkVPU5kF14= +cloud.google.com/go/run v0.2.0/go.mod h1:CNtKsTA1sDcnqqIFR3Pb5Tq0usWxJJvsWOCPldRU3Do= +cloud.google.com/go/run v0.3.0/go.mod h1:TuyY1+taHxTjrD0ZFk2iAR+xyOXEA0ztb7U3UNA0zBo= +cloud.google.com/go/run v0.8.0/go.mod h1:VniEnuBwqjigv0A7ONfQUaEItaiCRVujlMqerPPiktM= +cloud.google.com/go/run v0.9.0/go.mod h1:Wwu+/vvg8Y+JUApMwEDfVfhetv30hCG4ZwDR/IXl2Qg= +cloud.google.com/go/scheduler v1.4.0/go.mod h1:drcJBmxF3aqZJRhmkHQ9b3uSSpQoltBPGPxGAWROx6s= +cloud.google.com/go/scheduler v1.5.0/go.mod h1:ri073ym49NW3AfT6DZi21vLZrG07GXr5p3H1KxN5QlI= +cloud.google.com/go/scheduler v1.6.0/go.mod h1:SgeKVM7MIwPn3BqtcBntpLyrIJftQISRrYB5ZtT+KOk= +cloud.google.com/go/scheduler v1.7.0/go.mod h1:jyCiBqWW956uBjjPMMuX09n3x37mtyPJegEWKxRsn44= +cloud.google.com/go/scheduler v1.8.0/go.mod h1:TCET+Y5Gp1YgHT8py4nlg2Sew8nUHMqcpousDgXJVQc= +cloud.google.com/go/scheduler v1.9.0/go.mod h1:yexg5t+KSmqu+njTIh3b7oYPheFtBWGcbVUYF1GGMIc= +cloud.google.com/go/secretmanager v1.6.0/go.mod h1:awVa/OXF6IiyaU1wQ34inzQNc4ISIDIrId8qE5QGgKA= +cloud.google.com/go/secretmanager v1.8.0/go.mod h1:hnVgi/bN5MYHd3Gt0SPuTPPp5ENina1/LxM+2W9U9J4= +cloud.google.com/go/secretmanager v1.9.0/go.mod h1:b71qH2l1yHmWQHt9LC80akm86mX8AL6X1MA01dW8ht4= +cloud.google.com/go/secretmanager v1.10.0/go.mod h1:MfnrdvKMPNra9aZtQFvBcvRU54hbPD8/HayQdlUgJpU= +cloud.google.com/go/security v1.5.0/go.mod h1:lgxGdyOKKjHL4YG3/YwIL2zLqMFCKs0UbQwgyZmfJl4= +cloud.google.com/go/security v1.7.0/go.mod h1:mZklORHl6Bg7CNnnjLH//0UlAlaXqiG7Lb9PsPXLfD0= +cloud.google.com/go/security v1.8.0/go.mod h1:hAQOwgmaHhztFhiQ41CjDODdWP0+AE1B3sX4OFlq+GU= +cloud.google.com/go/security v1.9.0/go.mod h1:6Ta1bO8LXI89nZnmnsZGp9lVoVWXqsVbIq/t9dzI+2Q= +cloud.google.com/go/security v1.10.0/go.mod h1:QtOMZByJVlibUT2h9afNDWRZ1G96gVywH8T5GUSb9IA= +cloud.google.com/go/security v1.12.0/go.mod h1:rV6EhrpbNHrrxqlvW0BWAIawFWq3X90SduMJdFwtLB8= +cloud.google.com/go/security v1.13.0/go.mod h1:Q1Nvxl1PAgmeW0y3HTt54JYIvUdtcpYKVfIB8AOMZ+0= +cloud.google.com/go/securitycenter v1.13.0/go.mod h1:cv5qNAqjY84FCN6Y9z28WlkKXyWsgLO832YiWwkCWcU= +cloud.google.com/go/securitycenter v1.14.0/go.mod h1:gZLAhtyKv85n52XYWt6RmeBdydyxfPeTrpToDPw4Auc= +cloud.google.com/go/securitycenter v1.15.0/go.mod h1:PeKJ0t8MoFmmXLXWm41JidyzI3PJjd8sXWaVqg43WWk= +cloud.google.com/go/securitycenter v1.16.0/go.mod h1:Q9GMaLQFUD+5ZTabrbujNWLtSLZIZF7SAR0wWECrjdk= +cloud.google.com/go/securitycenter v1.18.1/go.mod h1:0/25gAzCM/9OL9vVx4ChPeM/+DlfGQJDwBy/UC8AKK0= +cloud.google.com/go/securitycenter v1.19.0/go.mod h1:LVLmSg8ZkkyaNy4u7HCIshAngSQ8EcIRREP3xBnyfag= +cloud.google.com/go/servicecontrol v1.4.0/go.mod h1:o0hUSJ1TXJAmi/7fLJAedOovnujSEvjKCAFNXPQ1RaU= +cloud.google.com/go/servicecontrol v1.5.0/go.mod h1:qM0CnXHhyqKVuiZnGKrIurvVImCs8gmqWsDoqe9sU1s= +cloud.google.com/go/servicecontrol v1.10.0/go.mod h1:pQvyvSRh7YzUF2efw7H87V92mxU8FnFDawMClGCNuAA= +cloud.google.com/go/servicecontrol v1.11.0/go.mod h1:kFmTzYzTUIuZs0ycVqRHNaNhgR+UMUpw9n02l/pY+mc= +cloud.google.com/go/servicecontrol v1.11.1/go.mod h1:aSnNNlwEFBY+PWGQ2DoM0JJ/QUXqV5/ZD9DOLB7SnUk= +cloud.google.com/go/servicedirectory v1.4.0/go.mod h1:gH1MUaZCgtP7qQiI+F+A+OpeKF/HQWgtAddhTbhL2bs= +cloud.google.com/go/servicedirectory v1.5.0/go.mod h1:QMKFL0NUySbpZJ1UZs3oFAmdvVxhhxB6eJ/Vlp73dfg= +cloud.google.com/go/servicedirectory v1.6.0/go.mod h1:pUlbnWsLH9c13yGkxCmfumWEPjsRs1RlmJ4pqiNjVL4= +cloud.google.com/go/servicedirectory v1.7.0/go.mod h1:5p/U5oyvgYGYejufvxhgwjL8UVXjkuw7q5XcG10wx1U= +cloud.google.com/go/servicedirectory v1.8.0/go.mod h1:srXodfhY1GFIPvltunswqXpVxFPpZjf8nkKQT7XcXaY= +cloud.google.com/go/servicedirectory v1.9.0/go.mod h1:29je5JjiygNYlmsGz8k6o+OZ8vd4f//bQLtvzkPPT/s= +cloud.google.com/go/servicemanagement v1.4.0/go.mod h1:d8t8MDbezI7Z2R1O/wu8oTggo3BI2GKYbdG4y/SJTco= +cloud.google.com/go/servicemanagement v1.5.0/go.mod h1:XGaCRe57kfqu4+lRxaFEAuqmjzF0r+gWHjWqKqBvKFo= +cloud.google.com/go/servicemanagement v1.6.0/go.mod h1:aWns7EeeCOtGEX4OvZUWCCJONRZeFKiptqKf1D0l/Jc= +cloud.google.com/go/servicemanagement v1.8.0/go.mod h1:MSS2TDlIEQD/fzsSGfCdJItQveu9NXnUniTrq/L8LK4= +cloud.google.com/go/serviceusage v1.3.0/go.mod h1:Hya1cozXM4SeSKTAgGXgj97GlqUvF5JaoXacR1JTP/E= +cloud.google.com/go/serviceusage v1.4.0/go.mod h1:SB4yxXSaYVuUBYUml6qklyONXNLt83U0Rb+CXyhjEeU= +cloud.google.com/go/serviceusage v1.5.0/go.mod h1:w8U1JvqUqwJNPEOTQjrMHkw3IaIFLoLsPLvsE3xueec= +cloud.google.com/go/serviceusage v1.6.0/go.mod h1:R5wwQcbOWsyuOfbP9tGdAnCAc6B9DRwPG1xtWMDeuPA= +cloud.google.com/go/shell v1.3.0/go.mod h1:VZ9HmRjZBsjLGXusm7K5Q5lzzByZmJHf1d0IWHEN5X4= +cloud.google.com/go/shell v1.4.0/go.mod h1:HDxPzZf3GkDdhExzD/gs8Grqk+dmYcEjGShZgYa9URw= +cloud.google.com/go/shell v1.6.0/go.mod h1:oHO8QACS90luWgxP3N9iZVuEiSF84zNyLytb+qE2f9A= cloud.google.com/go/spanner v1.7.0/go.mod h1:sd3K2gZ9Fd0vMPLXzeCrF6fq4i63Q7aTLW/lBIfBkIk= +cloud.google.com/go/spanner v1.41.0/go.mod h1:MLYDBJR/dY4Wt7ZaMIQ7rXOTLjYrmxLE/5ve9vFfWos= +cloud.google.com/go/spanner v1.44.0/go.mod h1:G8XIgYdOK+Fbcpbs7p2fiprDw4CaZX63whnSMLVBxjk= +cloud.google.com/go/spanner v1.45.0/go.mod h1:FIws5LowYz8YAE1J8fOS7DJup8ff7xJeetWEo5REA2M= +cloud.google.com/go/speech v1.6.0/go.mod h1:79tcr4FHCimOp56lwC01xnt/WPJZc4v3gzyT7FoBkCM= +cloud.google.com/go/speech v1.7.0/go.mod h1:KptqL+BAQIhMsj1kOP2la5DSEEerPDuOP/2mmkhHhZQ= +cloud.google.com/go/speech v1.8.0/go.mod h1:9bYIl1/tjsAnMgKGHKmBZzXKEkGgtU+MpdDPTE9f7y0= +cloud.google.com/go/speech v1.9.0/go.mod h1:xQ0jTcmnRFFM2RfX/U+rk6FQNUF6DQlydUSyoooSpco= +cloud.google.com/go/speech v1.14.1/go.mod h1:gEosVRPJ9waG7zqqnsHpYTOoAS4KouMRLDFMekpJ0J0= +cloud.google.com/go/speech v1.15.0/go.mod h1:y6oH7GhqCaZANH7+Oe0BhgIogsNInLlz542tg3VqeYI= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= @@ -70,12 +539,83 @@ cloud.google.com/go/storage v1.9.0/go.mod h1:m+/etGaqZbylxaNT876QGXqEHp4PR2Rq5GM cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y= +cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeLgDvXzfIXc= +cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi80i/iqR2s= +cloud.google.com/go/storage v1.28.1/go.mod h1:Qnisd4CqDdo6BGs2AD5LLnEsmSQ80wQ5ogcBBKhU86Y= +cloud.google.com/go/storage v1.29.0/go.mod h1:4puEjyTKnku6gfKoTfNOU/W+a9JyuVNxjpS5GBrB8h4= +cloud.google.com/go/storagetransfer v1.5.0/go.mod h1:dxNzUopWy7RQevYFHewchb29POFv3/AaBgnhqzqiK0w= +cloud.google.com/go/storagetransfer v1.6.0/go.mod h1:y77xm4CQV/ZhFZH75PLEXY0ROiS7Gh6pSKrM8dJyg6I= +cloud.google.com/go/storagetransfer v1.7.0/go.mod h1:8Giuj1QNb1kfLAiWM1bN6dHzfdlDAVC9rv9abHot2W4= +cloud.google.com/go/storagetransfer v1.8.0/go.mod h1:JpegsHHU1eXg7lMHkvf+KE5XDJ7EQu0GwNJbbVGanEw= +cloud.google.com/go/talent v1.1.0/go.mod h1:Vl4pt9jiHKvOgF9KoZo6Kob9oV4lwd/ZD5Cto54zDRw= +cloud.google.com/go/talent v1.2.0/go.mod h1:MoNF9bhFQbiJ6eFD3uSsg0uBALw4n4gaCaEjBw9zo8g= +cloud.google.com/go/talent v1.3.0/go.mod h1:CmcxwJ/PKfRgd1pBjQgU6W3YBwiewmUzQYH5HHmSCmM= +cloud.google.com/go/talent v1.4.0/go.mod h1:ezFtAgVuRf8jRsvyE6EwmbTK5LKciD4KVnHuDEFmOOA= +cloud.google.com/go/talent v1.5.0/go.mod h1:G+ODMj9bsasAEJkQSzO2uHQWXHHXUomArjWQQYkqK6c= +cloud.google.com/go/texttospeech v1.4.0/go.mod h1:FX8HQHA6sEpJ7rCMSfXuzBcysDAuWusNNNvN9FELDd8= +cloud.google.com/go/texttospeech v1.5.0/go.mod h1:oKPLhR4n4ZdQqWKURdwxMy0uiTS1xU161C8W57Wkea4= +cloud.google.com/go/texttospeech v1.6.0/go.mod h1:YmwmFT8pj1aBblQOI3TfKmwibnsfvhIBzPXcW4EBovc= +cloud.google.com/go/tpu v1.3.0/go.mod h1:aJIManG0o20tfDQlRIej44FcwGGl/cD0oiRyMKG19IQ= +cloud.google.com/go/tpu v1.4.0/go.mod h1:mjZaX8p0VBgllCzF6wcU2ovUXN9TONFLd7iz227X2Xg= +cloud.google.com/go/tpu v1.5.0/go.mod h1:8zVo1rYDFuW2l4yZVY0R0fb/v44xLh3llq7RuV61fPM= +cloud.google.com/go/trace v1.3.0/go.mod h1:FFUE83d9Ca57C+K8rDl/Ih8LwOzWIV1krKgxg6N0G28= +cloud.google.com/go/trace v1.4.0/go.mod h1:UG0v8UBqzusp+z63o7FK74SdFE+AXpCLdFb1rshXG+Y= +cloud.google.com/go/trace v1.8.0/go.mod h1:zH7vcsbAhklH8hWFig58HvxcxyQbaIqMarMg9hn5ECA= +cloud.google.com/go/trace v1.9.0/go.mod h1:lOQqpE5IaWY0Ixg7/r2SjixMuc6lfTFeO4QGM4dQWOk= +cloud.google.com/go/translate v1.3.0/go.mod h1:gzMUwRjvOqj5i69y/LYLd8RrNQk+hOmIXTi9+nb3Djs= +cloud.google.com/go/translate v1.4.0/go.mod h1:06Dn/ppvLD6WvA5Rhdp029IX2Mi3Mn7fpMRLPvXT5Wg= +cloud.google.com/go/translate v1.5.0/go.mod h1:29YDSYveqqpA1CQFD7NQuP49xymq17RXNaUDdc0mNu0= +cloud.google.com/go/translate v1.6.0/go.mod h1:lMGRudH1pu7I3n3PETiOB2507gf3HnfLV8qlkHZEyos= +cloud.google.com/go/translate v1.7.0/go.mod h1:lMGRudH1pu7I3n3PETiOB2507gf3HnfLV8qlkHZEyos= +cloud.google.com/go/video v1.8.0/go.mod h1:sTzKFc0bUSByE8Yoh8X0mn8bMymItVGPfTuUBUyRgxk= +cloud.google.com/go/video v1.9.0/go.mod h1:0RhNKFRF5v92f8dQt0yhaHrEuH95m068JYOvLZYnJSw= +cloud.google.com/go/video v1.12.0/go.mod h1:MLQew95eTuaNDEGriQdcYn0dTwf9oWiA4uYebxM5kdg= +cloud.google.com/go/video v1.13.0/go.mod h1:ulzkYlYgCp15N2AokzKjy7MQ9ejuynOJdf1tR5lGthk= +cloud.google.com/go/video v1.14.0/go.mod h1:SkgaXwT+lIIAKqWAJfktHT/RbgjSuY6DobxEp0C5yTQ= +cloud.google.com/go/video v1.15.0/go.mod h1:SkgaXwT+lIIAKqWAJfktHT/RbgjSuY6DobxEp0C5yTQ= +cloud.google.com/go/videointelligence v1.6.0/go.mod h1:w0DIDlVRKtwPCn/C4iwZIJdvC69yInhW0cfi+p546uU= +cloud.google.com/go/videointelligence v1.7.0/go.mod h1:k8pI/1wAhjznARtVT9U1llUaFNPh7muw8QyOUpavru4= +cloud.google.com/go/videointelligence v1.8.0/go.mod h1:dIcCn4gVDdS7yte/w+koiXn5dWVplOZkE+xwG9FgK+M= +cloud.google.com/go/videointelligence v1.9.0/go.mod h1:29lVRMPDYHikk3v8EdPSaL8Ku+eMzDljjuvRs105XoU= +cloud.google.com/go/videointelligence v1.10.0/go.mod h1:LHZngX1liVtUhZvi2uNS0VQuOzNi2TkY1OakiuoUOjU= +cloud.google.com/go/vision v1.2.0/go.mod h1:SmNwgObm5DpFBme2xpyOyasvBc1aPdjvMk2bBk0tKD0= +cloud.google.com/go/vision/v2 v2.2.0/go.mod h1:uCdV4PpN1S0jyCyq8sIM42v2Y6zOLkZs+4R9LrGYwFo= +cloud.google.com/go/vision/v2 v2.3.0/go.mod h1:UO61abBx9QRMFkNBbf1D8B1LXdS2cGiiCRx0vSpZoUo= +cloud.google.com/go/vision/v2 v2.4.0/go.mod h1:VtI579ll9RpVTrdKdkMzckdnwMyX2JILb+MhPqRbPsY= +cloud.google.com/go/vision/v2 v2.5.0/go.mod h1:MmaezXOOE+IWa+cS7OhRRLK2cNv1ZL98zhqFFZaaH2E= +cloud.google.com/go/vision/v2 v2.6.0/go.mod h1:158Hes0MvOS9Z/bDMSFpjwsUrZ5fPrdwuyyvKSGAGMY= +cloud.google.com/go/vision/v2 v2.7.0/go.mod h1:H89VysHy21avemp6xcf9b9JvZHVehWbET0uT/bcuY/0= +cloud.google.com/go/vmmigration v1.2.0/go.mod h1:IRf0o7myyWFSmVR1ItrBSFLFD/rJkfDCUTO4vLlJvsE= +cloud.google.com/go/vmmigration v1.3.0/go.mod h1:oGJ6ZgGPQOFdjHuocGcLqX4lc98YQ7Ygq8YQwHh9A7g= +cloud.google.com/go/vmmigration v1.5.0/go.mod h1:E4YQ8q7/4W9gobHjQg4JJSgXXSgY21nA5r8swQV+Xxc= +cloud.google.com/go/vmmigration v1.6.0/go.mod h1:bopQ/g4z+8qXzichC7GW1w2MjbErL54rk3/C843CjfY= +cloud.google.com/go/vmwareengine v0.1.0/go.mod h1:RsdNEf/8UDvKllXhMz5J40XxDrNJNN4sagiox+OI208= +cloud.google.com/go/vmwareengine v0.2.2/go.mod h1:sKdctNJxb3KLZkE/6Oui94iw/xs9PRNC2wnNLXsHvH8= +cloud.google.com/go/vmwareengine v0.3.0/go.mod h1:wvoyMvNWdIzxMYSpH/R7y2h5h3WFkx6d+1TIsP39WGY= +cloud.google.com/go/vpcaccess v1.4.0/go.mod h1:aQHVbTWDYUR1EbTApSVvMq1EnT57ppDmQzZ3imqIk4w= +cloud.google.com/go/vpcaccess v1.5.0/go.mod h1:drmg4HLk9NkZpGfCmZ3Tz0Bwnm2+DKqViEpeEpOq0m8= +cloud.google.com/go/vpcaccess v1.6.0/go.mod h1:wX2ILaNhe7TlVa4vC5xce1bCnqE3AeH27RV31lnmZes= +cloud.google.com/go/webrisk v1.4.0/go.mod h1:Hn8X6Zr+ziE2aNd8SliSDWpEnSS1u4R9+xXZmFiHmGE= +cloud.google.com/go/webrisk v1.5.0/go.mod h1:iPG6fr52Tv7sGk0H6qUFzmL3HHZev1htXuWDEEsqMTg= +cloud.google.com/go/webrisk v1.6.0/go.mod h1:65sW9V9rOosnc9ZY7A7jsy1zoHS5W9IAXv6dGqhMQMc= +cloud.google.com/go/webrisk v1.7.0/go.mod h1:mVMHgEYH0r337nmt1JyLthzMr6YxwN1aAIEc2fTcq7A= +cloud.google.com/go/webrisk v1.8.0/go.mod h1:oJPDuamzHXgUc+b8SiHRcVInZQuybnvEW72PqTc7sSg= +cloud.google.com/go/websecurityscanner v1.3.0/go.mod h1:uImdKm2wyeXQevQJXeh8Uun/Ym1VqworNDlBXQevGMo= +cloud.google.com/go/websecurityscanner v1.4.0/go.mod h1:ebit/Fp0a+FWu5j4JOmJEV8S8CzdTkAS77oDsiSqYWQ= +cloud.google.com/go/websecurityscanner v1.5.0/go.mod h1:Y6xdCPy81yi0SQnDY1xdNTNpfY1oAgXUlcfN3B3eSng= +cloud.google.com/go/workflows v1.6.0/go.mod h1:6t9F5h/unJz41YqfBmqSASJSXccBLtD1Vwf+KmJENM0= +cloud.google.com/go/workflows v1.7.0/go.mod h1:JhSrZuVZWuiDfKEFxU0/F1PQjmpnpcoISEXH2bcHC3M= +cloud.google.com/go/workflows v1.8.0/go.mod h1:ysGhmEajwZxGn1OhGOGKsTXc5PyxOc0vfKf5Af+to4M= +cloud.google.com/go/workflows v1.9.0/go.mod h1:ZGkj1aFIOd9c8Gerkjjq7OW7I5+l6cSvT3ujaO/WwSA= +cloud.google.com/go/workflows v1.10.0/go.mod h1:fZ8LmRmZQWacon9UCX1r/g/DfAXx5VcPALq2CxzdePw= contrib.go.opencensus.io/exporter/stackdriver v0.13.4/go.mod h1:aXENhDJ1Y4lIg4EUaVTwzvYETVNZk10Pu26tevFKLUc= dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBrvjyP0v+ecvNYvCpyZgu5/xkfAUhi6wJj28eUfSU= dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1:a1inKt/atXimZ4Mv927x+r7UpyzRUf4emIoiiSC2TN4= dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU= +gioui.org v0.0.0-20210308172011-57750fc8a0a6/go.mod h1:RSH6KIUZ0p2xy5zHDxgAM4zumjgTw83q2ge/PI+yyw8= +git.sr.ht/~sbinet/gg v0.3.1/go.mod h1:KGYtlADtqsqANL9ueOFkWymvzUvLMQllU5Ixo+8v3pc= github.com/AlecAivazis/survey/v2 v2.3.5 h1:A8cYupsAZkjaUmhtTYv3sSqc7LO5mp1XDfqe5E/9wRQ= github.com/AlecAivazis/survey/v2 v2.3.5/go.mod h1:4AuI9b7RjAR+G7v9+C4YSlX/YL3K3cWNXgWXOhllqvI= github.com/AlekSi/gocov-xml v0.0.0-20190121064608-3a14fb1c4737 h1:JZHBkt0GhM+ARQykshqpI49yaWCHQbJonH3XpDTwMZQ= @@ -158,6 +698,7 @@ github.com/IBM/platform-services-go-sdk v0.24.0 h1:E6dYo+0SvJpXoDeP7T+1INWlwqE6k github.com/IBM/platform-services-go-sdk v0.24.0/go.mod h1:kzN2JboXZjsewjZSrfIcxPFDGLFIAhJYFoyb6NFmF20= github.com/IBM/vpc-go-sdk v1.0.1 h1:D2cu4KRsM8Q8bLWz/uxp8m7nzUm33mcgDv1sD0w/E8M= github.com/IBM/vpc-go-sdk v1.0.1/go.mod h1:bhd7r482lV30UJz46r2oRgYGawGEo+TuS41ZLIY65y0= +github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c/go.mod h1:X0CRv0ky0k6m906ixxpzmDRLvX58TFUKS2eePweuyxk= github.com/Joker/hpp v1.0.0/go.mod h1:8x5n+M1Hp5hC0g8okX3sR3vFQwynaX/UgSOM9MeBKzY= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd/go.mod h1:64YHyfSL2R96J44Nlwm39UHepQbyR5q10x7iYa1ks2E= @@ -220,7 +761,10 @@ github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4Rq github.com/ajeddeloh/go-json v0.0.0-20200220154158-5ae607161559 h1:4SPQljF/GJ8Q+QlCWMWxRBepub4DresnOm4eI2ebFGc= github.com/ajeddeloh/go-json v0.0.0-20200220154158-5ae607161559/go.mod h1:otnto4/Icqn88WCcM4bhIJNSgsh9VLBuspyyCfvof9c= github.com/ajg/form v1.5.1/go.mod h1:uL1WgH+h2mgNtvBq0339dVnzXdBETtL2LeUXaIv25UY= +github.com/ajstarks/deck v0.0.0-20200831202436-30c9fc6549a9/go.mod h1:JynElWSGnm/4RlzPXRlREEwqTHAN3T56Bv2ITsFT3gY= +github.com/ajstarks/deck/generate v0.0.0-20210309230005-c3f852c02e19/go.mod h1:T13YZdzov6OU0A1+RfKZiZN9ca6VeKdBdyDV+BY97Tk= github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= +github.com/ajstarks/svgo v0.0.0-20211024235047-1546f124cd8b/go.mod h1:1KcenG0jGWcpt8ov532z81sp/kMMUG485J2InIOyADM= github.com/akavel/rsrc v0.8.0/go.mod h1:uLoCtb9J+EyAqh+26kdrTgmzRBFPGOolLWKpdxkKq+c= github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7/go.mod h1:6zEj6s6u/ghQa61ZWa/C2Aw3RkjiTBOix7dkqa1VLIs= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= @@ -244,15 +788,19 @@ github.com/alvaroloes/enumer v1.1.2/go.mod h1:FxrjvuXoDAx9isTJrv4c+T410zFi0DtXIT github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= github.com/andybalholm/brotli v1.0.2/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu3qAvBg8x/Y= github.com/andybalholm/brotli v1.0.3/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= +github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/antlr/antlr4/runtime/Go/antlr v0.0.0-20210826220005-b48c857c3a0e/go.mod h1:F7bn7fEU90QkQ3tnmaTx3LTKLEDqnwWODIYppRQ5hnY= github.com/antlr/antlr4/runtime/Go/antlr v0.0.0-20220418222510-f25a4f6275ed/go.mod h1:F7bn7fEU90QkQ3tnmaTx3LTKLEDqnwWODIYppRQ5hnY= github.com/aokoli/goutils v1.0.1/go.mod h1:SijmP0QR8LtwsmDs8Yii5Z/S4trXFGFC2oO5g9DP+DQ= +github.com/apache/arrow/go/v10 v10.0.1/go.mod h1:YvhnlEePVnBS4+0z3fhPfUy7W1Ikj0Ih0vcRo/gZ1M0= +github.com/apache/arrow/go/v11 v11.0.0/go.mod h1:Eg5OsL5H+e299f7u5ssuXsuHQVEGC4xei5aX110hRiI= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/apache/thrift v0.15.0/go.mod h1:PHK3hniurgQaNMZYaCLEqXKsYK8upmhPbmdP2FXSqgU= +github.com/apache/thrift v0.16.0/go.mod h1:PHK3hniurgQaNMZYaCLEqXKsYK8upmhPbmdP2FXSqgU= github.com/apparentlymart/go-cidr v1.1.0 h1:2mAhrMoF+nhXqxTzSZMUzDHkLjmIHC+Zzn4tdgBZjnU= github.com/apparentlymart/go-cidr v1.1.0/go.mod h1:EBcsNrHc3zQeuaeCeCtQruQm+n9/YjEn/vI25Lg7Gwc= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= @@ -313,6 +861,8 @@ github.com/blizzy78/varnamelen v0.8.0/go.mod h1:V9TzQZ4fLJ1DSrjVDfl89H7aMnTvKkAp github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= github.com/bombsimon/wsl/v3 v3.3.0 h1:Mka/+kRLoQJq7g2rggtgQsjuI/K5Efd87WX96EWFxjM= github.com/bombsimon/wsl/v3 v3.3.0/go.mod h1:st10JtZYLE4D5sC7b8xV4zTKZwAQjCH/Hy2Pm1FNZIc= +github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= +github.com/boombuler/barcode v1.0.1/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= github.com/breml/bidichk v0.1.1/go.mod h1:zbfeitpevDUGI7V91Uzzuwrn4Vls8MoBMrwtt78jmso= github.com/breml/bidichk v0.2.3 h1:qe6ggxpTfA8E75hdjWPZ581sY3a2lnl0IRxLQFelECI= @@ -335,13 +885,16 @@ github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QH github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= github.com/certifi/gocertifi v0.0.0-20180118203423-deb3ae2ef261/go.mod h1:GJKEexRPVJrBSOjoqN5VNOIKJ5Q3RViH6eu3puDRwx4= github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5/go.mod h1:/iP1qXHoty45bqomnu2LM+VVyAEdWN+vtSHGlQgyxbw= github.com/chai2010/gettext-go v1.0.2/go.mod h1:y+wnP2cHYaVj19NZhYKAwEMH2CI1gNHeQQ+5AjwawxA= github.com/charithe/durationcheck v0.0.9 h1:mPP4ucLrf/rKZiIG/a9IPXHGlh8p4CzgpyTy6EEutYk= @@ -365,8 +918,14 @@ github.com/cloudflare/cfssl v1.5.0/go.mod h1:sPPkBS5L8l8sRc/IOO1jG51Xb34u+TYhL6P github.com/cloudflare/go-metrics v0.0.0-20151117154305-6a9aea36fb41/go.mod h1:eaZPlJWD+G9wseg1BuRXlHnjntPMrywMsyxf+LTOdP4= github.com/cloudflare/golz4 v0.0.0-20150217214814-ef862a3cdc58/go.mod h1:EOBUe0h4xcZ5GoxqC5SDxFQ8gwyZPKQoEzownBlhI80= github.com/cloudflare/redoctober v0.0.0-20171127175943-746a508df14c/go.mod h1:6Se34jNoqrd8bTxrmJB2Bg2aoZ2CdSXonils9NsiNgo= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= github.com/cockroachdb/cockroach-go v0.0.0-20181001143604-e0a95dfd547c/go.mod h1:XGLbWH/ujMcbPbhZq52Nv6UrCghb1yGn//133kEsvDk= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= @@ -549,9 +1108,14 @@ github.com/emicklei/go-restful v2.15.0+incompatible/go.mod h1:otzb+WCGbkyDHkqmQm github.com/emicklei/go-restful/v3 v3.8.0 h1:eCZ8ulSerjdAiaNpF7GxXIE7ZCMo1moN1qX+S609eVw= github.com/emicklei/go-restful/v3 v3.8.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/emirpasic/gods v1.12.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= +github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= +github.com/envoyproxy/go-control-plane v0.10.3/go.mod h1:fJJn/j26vwOu972OllsvAgJJM//w9BV6Fxbg2LuVd34= +github.com/envoyproxy/go-control-plane v0.11.1-0.20230524094728-9239064ad72f/go.mod h1:sfYdkwUW4BA3PbKjySwjJy+O4Pu0h62rlqCMHNk+K+Q= github.com/envoyproxy/protoc-gen-validate v0.0.14/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/envoyproxy/protoc-gen-validate v0.6.7/go.mod h1:dyJXwwfPK2VSqiB9Klm1J6romD608Ba7Hij42vrOBCo= +github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w= +github.com/envoyproxy/protoc-gen-validate v0.10.1/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= github.com/esimonov/ifshort v1.0.3/go.mod h1:yZqNJUrNn20K8Q9n2CrjTKYyVEmX209Hgu+M1LBpeZE= github.com/esimonov/ifshort v1.0.4 h1:6SID4yGWfRae/M7hkVDVVyppy8q/v9OuxNdmjLQStBA= github.com/esimonov/ifshort v1.0.4/go.mod h1:Pe8zjlRrJ80+q2CxHLfEOfTwxCZ4O+MuhcHcfgNWTk0= @@ -583,6 +1147,7 @@ github.com/firefart/nonamedreturns v1.0.4 h1:abzI1p7mAEPYuR4A+VLKn4eNDOycjYo2phm github.com/firefart/nonamedreturns v1.0.4/go.mod h1:TDhe/tjI1BXo48CmYbUduTV7BdIga8MAO/xbKdcVsGI= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= +github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/form3tech-oss/jwt-go v3.2.5+incompatible h1:/l4kBbb4/vGSsdtB5nUe8L7B9mImVMaBPw9L/0TBHU8= @@ -626,6 +1191,11 @@ github.com/go-critic/go-critic v0.6.1/go.mod h1:SdNCfU0yF3UBjtaZGw6586/WocupMOJu github.com/go-critic/go-critic v0.6.4 h1:tucuG1pvOyYgpBIrVxw0R6gwO42lNa92Aq3VaDoIs+E= github.com/go-critic/go-critic v0.6.4/go.mod h1:qL5SOlk7NtY6sJPoVCTKDIgzNOxHkkkOCVDyi9wJe1U= github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= +github.com/go-fonts/dejavu v0.1.0/go.mod h1:4Wt4I4OU2Nq9asgDCteaAaWZOV24E+0/Pwo0gppep4g= +github.com/go-fonts/latin-modern v0.2.0/go.mod h1:rQVLdDMK+mK1xscDwsqM5J8U2jrRa3T0ecnM9pNujks= +github.com/go-fonts/liberation v0.1.1/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY= +github.com/go-fonts/liberation v0.2.0/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY= +github.com/go-fonts/stix v0.1.0/go.mod h1:w/c1f0ldAUlJmLBvlbkvVXLAD+tAMqobIIQpmnUIzUY= github.com/go-git/gcfg v1.5.0/go.mod h1:5m20vg6GwYabIxaOonVkTdrILxQMpEShl1xiMF4ua+E= github.com/go-git/go-billy/v5 v5.0.0/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0= github.com/go-git/go-billy/v5 v5.1.0/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0= @@ -639,6 +1209,8 @@ github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2 github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= +github.com/go-latex/latex v0.0.0-20210118124228-b3d85cf34e07/go.mod h1:CO1AlKB2CSIqUrmQPqA0gdRIlnLEY0gK5JGjh37zN5U= +github.com/go-latex/latex v0.0.0-20210823091927-c0d11ff05a81/go.mod h1:SX0U8uGpxhq9o2S/CELCSUxEWWAuoCUcVCQWv7G2OCk= github.com/go-log/log v0.0.0-20181211034820-a514cf01a3eb/go.mod h1:4mBwpdRMFLiuXZDCwU2lKQFsoSCo72j3HqBK9d81N2M= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= @@ -753,6 +1325,8 @@ github.com/go-openapi/validate v0.20.1/go.mod h1:b60iJT+xNNLfaQJUqLI7946tYiFEOuE github.com/go-openapi/validate v0.20.3/go.mod h1:goDdqVGiigM3jChcrYJxD2joalke3ZXeftD16byIjA4= github.com/go-openapi/validate v0.22.0 h1:b0QecH6VslW/TxtpKgzpO1SNG7GU2FsaqKdP1E2T50Y= github.com/go-openapi/validate v0.22.0/go.mod h1:rjnrwK57VJ7A8xqfpAOEKRH8yQSGUriMu5/zuPSQ1hg= +github.com/go-pdf/fpdf v0.5.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M= +github.com/go-pdf/fpdf v0.6.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M= github.com/go-playground/assert/v2 v2.0.1 h1:MsBgLAaY856+nPRTKrp3/OZK38U/wa0CcBYNjji3q3A= github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= @@ -835,6 +1409,7 @@ github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJA github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22YWvt/FAX9NnzrNzcI4wNYi9Yku4O0LKYflo= github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= github.com/gobwas/ws v1.0.2/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM= +github.com/goccy/go-json v0.9.11/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= github.com/goccy/go-yaml v1.8.1/go.mod h1:wS4gNoLalDSJxo/SpngzPQ2BN4uuZVLCmbM4S3vd4+Y= github.com/gocql/gocql v0.0.0-20190301043612-f6df8288f9b4/go.mod h1:4Fw1eo5iaEhDUs8XyuhSVCVy52Jq3L+/3GJgYkwc+/0= github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= @@ -863,6 +1438,7 @@ github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2V github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= +github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20181024230925-c65c006176ff/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -895,12 +1471,14 @@ github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= -github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.0-20170215233205-553a64147049/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2 h1:23T5iq8rbUYlhpt5DB4XJkc6BU31uODLD1o1gKvZmD0= github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2/go.mod h1:k9Qvh+8juN+UKMCS/3jFtGICgW8O96FVaZsaxdzDkR4= github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a h1:w8hkcTqaFpzKqonE9uMCefW1WDie15eSP/4MssdenaM= @@ -943,6 +1521,7 @@ github.com/google/cel-go v0.12.4/go.mod h1:Av7CU6r6X3YmcHR9GXqVDaEJYfEtSxl6wvIjU github.com/google/cel-spec v0.6.0/go.mod h1:Nwjgxy5CbjlPrtCWjeDjUyKMl8w41YBYGjsyDdqk0xA= github.com/google/certificate-transparency-go v1.0.21/go.mod h1:QeJfpSbVSfYc7RgB3gJFj9cbuQMMchQxrWXz8Ruopmg= github.com/google/certificate-transparency-go v1.1.1/go.mod h1:FDKqPvSXawb2ecErVRrD+nfy23RCzyl7eqVCEmlT1Zs= +github.com/google/flatbuffers v2.0.8+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA= github.com/google/gnostic v0.5.7-v3refs h1:FhTMOKj2VhjpouxvWJAV1TL304uMlb9zcDqkl6cEI54= github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ= @@ -959,8 +1538,9 @@ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= -github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg= github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-containerregistry v0.8.0/go.mod h1:wW5v71NHGnQyb4k+gSshjxidrC7lN33MdWEn+Mz9TsI= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= @@ -972,6 +1552,7 @@ github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXi github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= +github.com/google/martian/v3 v3.3.2/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= github.com/google/pprof v0.0.0-20181127221834-b4f47329b966/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= @@ -1003,8 +1584,11 @@ github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+ github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= -github.com/googleapis/enterprise-certificate-proxy v0.1.0 h1:zO8WHNx/MYiAKJ3d5spxZXZE6KHmIQGQcAzwUzV7qQw= github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= +github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg= +github.com/googleapis/enterprise-certificate-proxy v0.2.1/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= +github.com/googleapis/enterprise-certificate-proxy v0.2.3 h1:yk9/cqRKtT9wXZSsRH9aurXEpJX+U6FLtpYTdC3R06k= +github.com/googleapis/enterprise-certificate-proxy v0.2.3/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= @@ -1013,8 +1597,12 @@ github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pf github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= -github.com/googleapis/gax-go/v2 v2.4.0 h1:dS9eYAjhrE2RjmzYw2XAPvcXfmcQLtFEQWn0CR82awk= github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= +github.com/googleapis/gax-go/v2 v2.5.1/go.mod h1:h6B0KMMFNtI2ddbGJn3T3ZbwkeT6yqEF02fYlzkUCyo= +github.com/googleapis/gax-go/v2 v2.6.0/go.mod h1:1mjbznJAPHFpesgE5ucqfYEscaz5kMdcIDwU/6+DDoY= +github.com/googleapis/gax-go/v2 v2.7.0/go.mod h1:TEop28CZZQ2y+c0VxMUmu1lV+fQx57QpBWsYpwqHJx8= +github.com/googleapis/gax-go/v2 v2.7.1 h1:gF4c0zjUP2H/s/hEGyLA3I0fA2ZWjzYiONAD6cvPr8A= +github.com/googleapis/gax-go/v2 v2.7.1/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38/qKbhSAKP6QI= github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= github.com/gookit/color v1.4.2/go.mod h1:fqRyamkC1W8uxl+lxCQxOT09l/vYfZ+QeiX3rKQHCoQ= @@ -1071,6 +1659,8 @@ github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.12.1/go.mod h1:8XEsbTttt/W+VvjtQhLACqCisSPWTxCZ7sBRjU6iH9c= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3/go.mod h1:o//XUCC/F+yRGJoPO/VU0GSB0f8Nhgmxx0VIRUvaC0w= github.com/grpc-ecosystem/grpc-health-probe v0.3.2/go.mod h1:izVOQ4RWbjUR6lm4nn+VLJyQ+FyaiGmprEYgI04Gs7U= github.com/h2non/filetype v1.1.1 h1:xvOwnXKAckvtLWsN398qS9QhlxlnVXBjXBydK2/UFB4= github.com/h2non/filetype v1.1.1/go.mod h1:319b3zT68BvV+WRj7cwy856M2ehB3HqNOt6sy1HndBY= @@ -1223,6 +1813,7 @@ github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8 github.com/julz/importas v0.0.0-20210419104244-841f0c0fe66d/go.mod h1:oSFU2R4XK/P7kNBrnL/FEQlDGN1/6WoxXEjSSXO0DV0= github.com/julz/importas v0.1.0 h1:F78HnrsjY3cR7j0etXy5+TU1Zuy7Xt08X/1aJnH5xXY= github.com/julz/importas v0.1.0/go.mod h1:oSFU2R4XK/P7kNBrnL/FEQlDGN1/6WoxXEjSSXO0DV0= +github.com/jung-kurt/gofpdf v1.0.0/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q1U84EfirKl04SVQ/s7nPm1ZPhiXd34z40TNz36k= github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8= @@ -1248,6 +1839,7 @@ github.com/kisielk/gotool v1.0.0 h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kisielk/sqlstruct v0.0.0-20150923205031-648daed35d49/go.mod h1:yyMNCyc/Ib3bDTKd379tNMpB/7/H5TjM2Y9QJ5THLbE= github.com/kisom/goutils v1.1.0/go.mod h1:+UBTfd78habUYWFbNWTJNG+jNG/i/lGURakr4A/yNRw= +github.com/klauspost/asmfmt v1.3.2/go.mod h1:AG8TuvYojzulgDAMCnYn50l/5QV3Bs/tp6j0HLHbNSE= github.com/klauspost/compress v1.8.2/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.9.7/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= @@ -1256,7 +1848,9 @@ github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8 github.com/klauspost/compress v1.13.5/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.14.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= +github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= +github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -1321,6 +1915,9 @@ github.com/logrusorgru/aurora v0.0.0-20181002194514-a7b3b318ed4e/go.mod h1:7rIyQ github.com/lufeee/execinquery v1.2.1 h1:hf0Ems4SHcUGBxpGN7Jz78z1ppVkP/837ZlETPCEtOM= github.com/lufeee/execinquery v1.2.1/go.mod h1:EC7DrEKView09ocscGHC+apXMIaorh4xqSxS/dy8SbM= github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= +github.com/lyft/protoc-gen-star v0.6.0/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= +github.com/lyft/protoc-gen-star v0.6.1/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= +github.com/lyft/protoc-gen-star/v2 v2.0.1/go.mod h1:RcCdONR2ScXaYnQC5tUzxzlpA3WVYF7/opLeUgcQs/o= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= github.com/magiconair/properties v1.8.6 h1:5ibWZ6iY0NctNGWo87LalDlEZ6R41TqbbDamhfG/Qzo= @@ -1380,6 +1977,7 @@ github.com/mattn/go-sqlite3 v1.10.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsO github.com/mattn/go-sqlite3 v1.12.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= github.com/mattn/go-sqlite3 v1.14.10/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= +github.com/mattn/go-sqlite3 v1.14.14/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= @@ -1403,6 +2001,8 @@ github.com/miekg/pkcs11 v1.0.2/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WT github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= github.com/mikefarah/yq/v2 v2.4.1/go.mod h1:i8SYf1XdgUvY2OFwSqGAtWOOgimD2McJ6iutoxRm4k0= github.com/mikefarah/yq/v3 v3.0.0-20201202084205-8846255d1c37/go.mod h1:dYWq+UWoFCDY1TndvFUQuhBbIYmZpjreC8adEAx93zE= +github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8/go.mod h1:mC1jAcsrzbxHt8iiaC+zU4b1ylILSosueou12R++wfY= +github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3/go.mod h1:RagcQ7I8IeTMnF8JTXieKnO4Z6JCsikNEzj0DwauVzE= github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= @@ -1657,8 +2257,12 @@ github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d h1:CdDQnGF8Nq9ocOS/xlSptM1N3BbrA6/kmaep5ggwaIA= github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d/go.mod h1:3OzsM7FXDQlpCiw2j81fOmAwQLnZnLGXVKUzeKQXIAw= github.com/phayes/freeport v0.0.0-20180830031419-95f893ade6f2/go.mod h1:iIss55rKnNBTvrwdmkUpLnDpZoAHvWaiq5+iMmen4AE= +github.com/phpdave11/gofpdf v1.4.2/go.mod h1:zpO6xFn9yxo3YLyMvW8HcKWVdbNqgIfOOp2dXMnm1mY= +github.com/phpdave11/gofpdi v1.0.12/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI= +github.com/phpdave11/gofpdi v1.0.13/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI= github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pin/tftp v2.1.0+incompatible/go.mod h1:xVpZOMCXTy+A5QMjEVN0Glwa1sUvaJhFXbr/aAxuxGY= github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU= @@ -1700,8 +2304,9 @@ github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1: github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4= +github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= @@ -1755,6 +2360,7 @@ github.com/quasilyte/regex/syntax v0.0.0-20200805063351-8f842688393c/go.mod h1:r github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567 h1:M8mH9eK4OUR4lu7Gd+PU1fV2/qnDNfzT635KRSObncs= github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567/go.mod h1:DWNGW8A4Y+GyBgPuaQJuWiy0XYftx4Xm/y5Jqk9I6VQ= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/robfig/cron v1.2.0/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k= @@ -1768,13 +2374,16 @@ github.com/rogpeppe/go-internal v1.4.0/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTE github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rogpeppe/go-internal v1.6.2/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE= -github.com/rogpeppe/go-internal v1.8.1 h1:geMPLpDpQOgVyCg5z5GoRwLHepNdb71NXb67XFkP+Eg= +github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/rubenv/sql-migrate v0.0.0-20200616145509-8d140a17f351/go.mod h1:DCgfY80j8GYL7MLEfvcpSFvjD0L5yZq/aZUJmhZklyg= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday v1.6.0/go.mod h1:ti0ldHuxg49ri4ksnFxlkCfN+hvslNlmVHqNRXXJNAY= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58/go.mod h1:6lfFZQK844Gfx8o5WFuvpxWRwnSoipWe/p622j1v06w= +github.com/ruudk/golang-pdf417 v0.0.0-20201230142125-a7e3863a1245/go.mod h1:pQAZKsJ8yyVxGRWYNEm9oFB8ieLgKFnamEyDmSA0BRk= github.com/rwcarlsen/goexif v0.0.0-20190401172101-9e8deecbddbd/go.mod h1:hPqNNc0+uJM6H+SuU8sEs5K5IQeKccPqeSjfgcKGgPk= github.com/ryancurrah/gomodguard v1.2.3/go.mod h1:rYbA/4Tg5c54mV1sv4sQTP5WOPBcoLtnBZ7/TEhXAbg= github.com/ryancurrah/gomodguard v1.2.4 h1:CpMSDKan0LtNGGhPrvupAoLeObRFjND8/tU1rEOtBp4= @@ -1865,9 +2474,10 @@ github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= -github.com/spf13/afero v1.8.2 h1:xehSyVa0YnHWsJ49JFljMpg1HX19V6NDZ1fkm1Xznbo= -github.com/spf13/afero v1.8.2/go.mod h1:CtAatgMJh6bJEIs48Ay/FOnkljP3WeGUG0MC1RfAqwo= +github.com/spf13/afero v1.9.2 h1:j49Hj62F0n+DaZ1dDCvhABaPNSGNkt32oRFxI33IEMw= +github.com/spf13/afero v1.9.2/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= @@ -1904,8 +2514,9 @@ github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5J github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= -github.com/stretchr/objx v0.4.0 h1:M2gUjqZET1qApGOWNSnZ49BAIMX4F/1plDv3+l31EJ4= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v0.0.0-20170130113145-4d4bfba8f1d1/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.1.4/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= @@ -1916,8 +2527,10 @@ github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= -github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.3 h1:RP3t2pwF7cMEbC1dqtB6poj3niw/9gnV4Cjg5oW5gtY= +github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/subosito/gotenv v1.4.0 h1:yAzM1+SmVcz5R4tXGsNMu1jUl2aOJXoiWUCEwwnGrvs= github.com/subosito/gotenv v1.4.0/go.mod h1:mZd6rFysKEcUhUHXJk0C/08wAgyDBFuwEYL7vWWGaGo= @@ -2048,6 +2661,8 @@ github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPS github.com/yvasiyarov/gorelic v0.0.7/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA= github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg= github.com/yvasiyarov/newrelic_platform_go v0.0.0-20160601141957-9c099fbc30e9/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg= +github.com/zeebo/assert v1.3.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0= +github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA= github.com/ziutek/mymysql v1.5.4/go.mod h1:LMSpPZ6DbqWFxNCHW77HeMg9I646SAhApZ/wKdgO/C0= github.com/zmap/rc2 v0.0.0-20131011165748-24b9757f5521/go.mod h1:3YZ9o3WnatTIZhuOtot4IcUfzoKVjUHqu6WALIyI0nE= github.com/zmap/zcertificate v0.0.0-20180516150559-0e3d58b1bac4/go.mod h1:5iU54tB79AMBcySS0R2XIyZBAVmeHranShAFELYx7is= @@ -2099,8 +2714,9 @@ go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= -go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/contrib v0.20.0/go.mod h1:G/EtFaa6qaN7+LxqfIAT3GiZa7Wv5DTBUzl5H4LY0Kc= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0/go.mod h1:oVGt1LRbBOBq1A5BQLlUg9UaU/54aiHw8cgjV3aWZ/E= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.20.0/go.mod h1:2AboqHi0CiIZU0qwhtUfCYD1GeUzvvIXWNkhDt7ZMG4= @@ -2113,6 +2729,8 @@ go.opentelemetry.io/otel/sdk/export/metric v0.20.0/go.mod h1:h7RBNMsDJ5pmI1zExLi go.opentelemetry.io/otel/sdk/metric v0.20.0/go.mod h1:knxiS8Xd4E/N+ZqKmUPf3gTTZ4/0TjTXukfxjzSTpHE= go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= +go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5/go.mod h1:nmDLcffg48OtT/PSW0Hg7FvpRQsQh5OSqIylirxKC7o= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= @@ -2203,14 +2821,24 @@ golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= golang.org/x/exp v0.0.0-20200331195152-e8c3332aa8e5/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw= -golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e h1:+WEEuIdZHnUeJJmEUjyYC2gfUMj69yZXw17EnHg/otA= -golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e/go.mod h1:Kr81I6Kryrl9sr8s2FK3vxD90NdsKWRuOIl2O4CvYbA= +golang.org/x/exp v0.0.0-20220827204233-334a2380cb91 h1:tnebWN09GYg9OLPss1KXj8txwZc6X6uMr6VFdcGNbHw= +golang.org/x/exp v0.0.0-20220827204233-334a2380cb91/go.mod h1:cyybsKvd6eL0RnXn6p/Grxp8F5bW7iYuBgsNCOHpMYE= golang.org/x/exp/typeparams v0.0.0-20220428152302-39d4317da171/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= golang.org/x/exp/typeparams v0.0.0-20220613132600-b0d781184e0d h1:+W8Qf4iJtMGKkyAygcKohjxTk4JPsL9DpzApJ22m5Ic= golang.org/x/exp/typeparams v0.0.0-20220613132600-b0d781184e0d/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20190910094157-69e4b8554b2a/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20200119044424-58c23975cae1/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20200430140353-33d19683fad8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20200618115811-c13761719519/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20201208152932-35266b937fa6/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20210216034530-4410531fe030/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20210607152325-775e3b0c77b9/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= +golang.org/x/image v0.0.0-20210628002857-a66eb6448b8d/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= +golang.org/x/image v0.0.0-20211028202545-6944b10bf410/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= +golang.org/x/image v0.0.0-20220302094943-723b81ca9867/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= @@ -2239,8 +2867,10 @@ golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.8.0 h1:LUYupSeNrTNCGzR/hVBk2NHZO4hXcVaW1k4Qx7rjPx8= +golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.9.0 h1:KENHtAZL2y3NLMYZeHY9DW8HW8V+kQyJsY/V9JlKvCs= +golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -2312,6 +2942,7 @@ golang.org/x/net v0.0.0-20210510120150-4163338589ed/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= @@ -2323,9 +2954,19 @@ golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220617184016-355a448f1bc9/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20221012135044-0b7e1fb9d458/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= +golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= +golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= +golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -2353,8 +2994,15 @@ golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= -golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2 h1:+jnHzr9VPj32ykQVai5DNahi9+NSp7yYuCsl5eAQtL0= golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= +golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20221006150949-b44042a4b9c1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.5.0/go.mod h1:9/XBHVqLaWO3/BRHs5jbpYCnOZVjj5V0ndyaAM7KB4I= +golang.org/x/oauth2 v0.6.0/go.mod h1:ycmewcwgD4Rpr3eZJLSB4Kyyljb3qDh40vJ8STE5HKw= +golang.org/x/oauth2 v0.7.0 h1:qe6s0zUXlPX80/dITx3440hWZ7GwMwgDDyrSGTPJG/g= +golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -2370,6 +3018,8 @@ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220819030929-7fc1605a5dde/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -2464,6 +3114,7 @@ golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210304124612-50617c2ba197/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -2485,6 +3136,7 @@ golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210816074244-15123e1e1f71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -2492,6 +3144,7 @@ golang.org/x/sys v0.0.0-20210915083310-ed5796bab164/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210917161153-d61c044b1678/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210921065528-437939a70204/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211013075003-97ac67df715c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -2513,12 +3166,20 @@ golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220702020025-31831981b65f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= @@ -2528,6 +3189,11 @@ golang.org/x/term v0.0.0-20210503060354-a79de5458b56/go.mod h1:tfny5GFUkzUvx4ps4 golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= +golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= +golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek= golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -2539,7 +3205,12 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -2550,8 +3221,10 @@ golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20220210224613-90d013bbcef8/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20220609170525-579cf78fd858 h1:Dpdu/EMxGMFgq0CeYMh4fazTD2vtlZRYE7wyynxJb9U= -golang.org/x/time v0.0.0-20220609170525-579cf78fd858/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20220922220347-f3bd1da661af/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= +golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -2660,6 +3333,7 @@ golang.org/x/tools v0.0.0-20201028025901-8cd080b735b3/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201114224030-61ea331ec02b/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201118003311-bd56c0adb394/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201124115921-2c860bdd6e78/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= @@ -2687,24 +3361,30 @@ golang.org/x/tools v0.1.10-0.20220218145154-897bd77cd717/go.mod h1:Uh6Zz+xoGYZom golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= golang.org/x/tools v0.1.11/go.mod h1:SgwaegtQh8clINPpECJMqnxLv9I09HLqnW3RMqW0CA4= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.6.0 h1:BOw41kyTf3PuCW1pVQf8+Cyg8pMlkYB1oo9iJ6D/lKM= +golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/tools v0.7.0 h1:W4OVu8VVOaIO0yzWMNdepAulS7YfoS3Zabrm8DOXXU4= +golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f h1:uF6paiQQebLeSXkrTqHqz0MXhXXS1KgF41eUdBNvxK0= golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= gomodules.xyz/jsonpatch/v2 v2.2.0 h1:4pT439QV83L+G9FkcCriY6EkpcK6r6bK+A5FBUMI7qY= gomodules.xyz/jsonpatch/v2 v2.2.0/go.mod h1:WXp+iVDkoLQqPudfQ9GBlwB2eZ5DKOnjQZCYdOS8GPY= gomodules.xyz/jsonpatch/v3 v3.0.1/go.mod h1:CBhndykehEwTOlEfnsfJwvkFQbSN8YZFr9M+cIHAJto= gomodules.xyz/orderedmap v0.1.0/go.mod h1:g9/TPUCm1t2gwD3j3zfV8uylyYhVdCNSi+xCEIu7yTU= gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0= +gonum.org/v1/gonum v0.9.3/go.mod h1:TZumC3NeyVQskjXqmyWt4S3bINhy7B4eYwW69EbyX+0= +gonum.org/v1/gonum v0.11.0/go.mod h1:fSG4YDCxxUZQJ7rKsQrj0gMOg00Il0Z96/qMA4bVQhA= gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= +gonum.org/v1/plot v0.9.0/go.mod h1:3Pcqqmp6RHvJI72kgb8fThyUnav364FOsdDo2aGW5lY= +gonum.org/v1/plot v0.10.1/go.mod h1:VZW5OlhkL1mysU9vaqNHnsy86inf6Ot+jB3r+BczCEo= google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y= @@ -2748,11 +3428,28 @@ google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/S google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8= google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs= google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= +google.golang.org/api v0.77.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw= google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg= google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o= -google.golang.org/api v0.91.0 h1:731+JzuwaJoZXRQGmPoBiV+SrsAfUaIkdMCWTcQNPyA= -google.golang.org/api v0.91.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= +google.golang.org/api v0.85.0/go.mod h1:AqZf8Ep9uZ2pyTvgL+x0D3Zt0eoT9b5E8fmzfu6FO2g= +google.golang.org/api v0.90.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= +google.golang.org/api v0.93.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= +google.golang.org/api v0.95.0/go.mod h1:eADj+UBuxkh5zlrSntJghuNeg8HwQ1w5lTKkuqaETEI= +google.golang.org/api v0.96.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.97.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.98.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.99.0/go.mod h1:1YOf74vkVndF7pG6hIHuINsM7eWwpVTAfNMNiL91A08= +google.golang.org/api v0.100.0/go.mod h1:ZE3Z2+ZOr87Rx7dqFsdRQkRBk36kDtp/h+QpHbB7a70= +google.golang.org/api v0.102.0/go.mod h1:3VFl6/fzoA+qNuS1N1/VfXY4LjoXN/wzeIp7TweWwGo= +google.golang.org/api v0.103.0/go.mod h1:hGtW6nK1AC+d9si/UBhw8Xli+QMOf6xyNAyJw4qU9w0= +google.golang.org/api v0.106.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= +google.golang.org/api v0.107.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= +google.golang.org/api v0.108.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= +google.golang.org/api v0.110.0/go.mod h1:7FC4Vvx1Mooxh8C5HWjzZHcavuS2f6pmJpZx60ca7iI= +google.golang.org/api v0.111.0/go.mod h1:qtFHvU9mhgTJegR31csQ+rwxyUTHOKFqCKWp1J0fdw0= +google.golang.org/api v0.114.0 h1:1xQPji6cO2E2vLiI+C/XiFAnsn1WV3mjaEwGLhi3grE= +google.golang.org/api v0.114.0/go.mod h1:ifYI2ZsFK6/uGddGfAD5BMxlnkBqCmqHSDUVi45N5Yg= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -2854,6 +3551,7 @@ google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2 google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= +google.golang.org/genproto v0.0.0-20220329172620-7be39ac1afc7/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= @@ -2865,11 +3563,59 @@ google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20220808131553-a91ffa7f803e h1:yXLYwnRRqNHIwwHZLiA9/aC9uieNrOF0owsijAiZJr8= -google.golang.org/genproto v0.0.0-20220808131553-a91ffa7f803e/go.mod h1:iHe1svFLAZg9VWz891+QbRMwUv9O/1Ww+/mngYeThbc= -google.golang.org/grpc v1.40.0 h1:AGJ0Ih4mHjSeibYkFGh1dD9KJ/eOtZ93I6hoHhukQ5Q= -google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/genproto v0.0.0-20220628213854-d9e0b6570c03/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220722212130-b98a9ff5e252/go.mod h1:GkXuJDJ6aQ7lnJcRF+SJVgFdQhypqgl3LB1C9vabdRE= +google.golang.org/genproto v0.0.0-20220801145646-83ce21fca29f/go.mod h1:iHe1svFLAZg9VWz891+QbRMwUv9O/1Ww+/mngYeThbc= +google.golang.org/genproto v0.0.0-20220815135757-37a418bb8959/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220817144833-d7fd3f11b9b1/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220822174746-9e6da59bd2fc/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220829144015-23454907ede3/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220829175752-36a9c930ecbf/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220913154956-18f8339a66a5/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220914142337-ca0e39ece12f/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220915135415-7fd63a7952de/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220916172020-2692e8806bfa/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220919141832-68c03719ef51/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220920201722-2b89144ce006/go.mod h1:ht8XFiar2npT/g4vkk7O0WYS1sHOHbdujxbEp7CJWbw= +google.golang.org/genproto v0.0.0-20220926165614-551eb538f295/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= +google.golang.org/genproto v0.0.0-20220926220553-6981cbe3cfce/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= +google.golang.org/genproto v0.0.0-20221010155953-15ba04fc1c0e/go.mod h1:3526vdqwhZAwq4wsRUaVG555sVgsNmIjRtO7t/JH29U= +google.golang.org/genproto v0.0.0-20221014173430-6e2ab493f96b/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= +google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= +google.golang.org/genproto v0.0.0-20221024153911-1573dae28c9c/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= +google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= +google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c/go.mod h1:CGI5F/G+E5bKwmfYo09AXuVN4dD894kIKUFmVbP2/Fo= +google.golang.org/genproto v0.0.0-20221109142239-94d6d90a7d66/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221114212237-e4508ebdbee1/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221117204609-8f9c96812029/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221201164419-0e50fba7f41c/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221201204527-e3fa12d562f3/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221202195650-67e5cbc046fd/go.mod h1:cTsE614GARnxrLsqKREzmNYJACSWWpAWdNMwnD7c2BE= +google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230112194545-e10362b5ecf9/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230113154510-dbe35b8444a5/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230123190316-2c411cf9d197/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230124163310-31e0e69b6fc2/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230125152338-dcaf20b6aeaa/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230127162408-596548ed4efa/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230209215440-0dfe4f8abfcc/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230216225411-c8e22ba71e44/go.mod h1:8B0gmkoRebU8ukX6HP+4wrVQUY1+6PkQ44BSyIlflHA= +google.golang.org/genproto v0.0.0-20230222225845-10f96fb3dbec/go.mod h1:3Dl5ZL0q0isWJt+FVcfpQyirqemEuLAK/iFvg1UP1Hw= +google.golang.org/genproto v0.0.0-20230223222841-637eb2293923/go.mod h1:3Dl5ZL0q0isWJt+FVcfpQyirqemEuLAK/iFvg1UP1Hw= +google.golang.org/genproto v0.0.0-20230303212802-e74f57abe488/go.mod h1:TvhZT5f700eVlTNwND1xoEZQeWTB2RY/65kplwl/bFA= +google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s= +google.golang.org/genproto v0.0.0-20230320184635-7606e756e683/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s= +google.golang.org/genproto v0.0.0-20230323212658-478b75c54725/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= +google.golang.org/genproto v0.0.0-20230330154414-c0448cd141ea/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= +google.golang.org/genproto v0.0.0-20230331144136-dcfb400f0633/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= +google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 h1:KpwkzHKEF7B9Zxg18WzOa7djJ+Ha5DzthMyZYQfEn2A= +google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU= +google.golang.org/grpc v1.56.3 h1:8I4C0Yq1EjstUzUJzpcRVbuYA2mODtEmpWiQoN/b2nc= +google.golang.org/grpc v1.56.3/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= google.golang.org/grpc/cmd/protoc-gen-go-grpc v0.0.0-20200709232328-d8193ee9cc3e/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= @@ -2886,8 +3632,10 @@ google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp0 google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.29.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= +google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= @@ -2974,6 +3722,7 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= honnef.co/go/tools v0.2.1/go.mod h1:lPVVZ2BS5TfnjLyizF7o7hv7j9/L+8cZY2hLyjP9cGY= honnef.co/go/tools v0.3.3 h1:oDx7VAwstgpYpb3wv0oxiZlxY+foCpRAwY7Vk6XpAgA= honnef.co/go/tools v0.3.3/go.mod h1:jzwdWgg7Jdq75wlfblQxO4neNaFFSvgc1tD5Wv8U0Yw= @@ -3019,6 +3768,40 @@ k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/ k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20220823124924-e9cbc92d1a73 h1:H9TCJUUx+2VA0ZiD9lvtaX8fthFsMoD+Izn93E/hm8U= k8s.io/utils v0.0.0-20220823124924-e9cbc92d1a73/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +lukechampine.com/uint128 v1.1.1/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= +lukechampine.com/uint128 v1.2.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= +modernc.org/cc/v3 v3.36.0/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= +modernc.org/cc/v3 v3.36.2/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= +modernc.org/cc/v3 v3.36.3/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= +modernc.org/ccgo/v3 v3.0.0-20220428102840-41399a37e894/go.mod h1:eI31LL8EwEBKPpNpA4bU1/i+sKOwOrQy8D87zWUcRZc= +modernc.org/ccgo/v3 v3.0.0-20220430103911-bc99d88307be/go.mod h1:bwdAnOoaIt8Ax9YdWGjxWsdkPcZyRPHqrOvJxaKAKGw= +modernc.org/ccgo/v3 v3.16.4/go.mod h1:tGtX0gE9Jn7hdZFeU88slbTh1UtCYKusWOoCJuvkWsQ= +modernc.org/ccgo/v3 v3.16.6/go.mod h1:tGtX0gE9Jn7hdZFeU88slbTh1UtCYKusWOoCJuvkWsQ= +modernc.org/ccgo/v3 v3.16.8/go.mod h1:zNjwkizS+fIFDrDjIAgBSCLkWbJuHF+ar3QRn+Z9aws= +modernc.org/ccgo/v3 v3.16.9/go.mod h1:zNMzC9A9xeNUepy6KuZBbugn3c0Mc9TeiJO4lgvkJDo= +modernc.org/ccorpus v1.11.6/go.mod h1:2gEUTrWqdpH2pXsmTM1ZkjeSrUWDpjMu2T6m29L/ErQ= +modernc.org/httpfs v1.0.6/go.mod h1:7dosgurJGp0sPaRanU53W4xZYKh14wfzX420oZADeHM= +modernc.org/libc v0.0.0-20220428101251-2d5f3daf273b/go.mod h1:p7Mg4+koNjc8jkqwcoFBJx7tXkpj00G77X7A72jXPXA= +modernc.org/libc v1.16.0/go.mod h1:N4LD6DBE9cf+Dzf9buBlzVJndKr/iJHG97vGLHYnb5A= +modernc.org/libc v1.16.1/go.mod h1:JjJE0eu4yeK7tab2n4S1w8tlWd9MxXLRzheaRnAKymU= +modernc.org/libc v1.16.17/go.mod h1:hYIV5VZczAmGZAnG15Vdngn5HSF5cSkbvfz2B7GRuVU= +modernc.org/libc v1.16.19/go.mod h1:p7Mg4+koNjc8jkqwcoFBJx7tXkpj00G77X7A72jXPXA= +modernc.org/libc v1.17.0/go.mod h1:XsgLldpP4aWlPlsjqKRdHPqCxCjISdHfM/yeWC5GyW0= +modernc.org/libc v1.17.1/go.mod h1:FZ23b+8LjxZs7XtFMbSzL/EhPxNbfZbErxEHc7cbD9s= +modernc.org/mathutil v1.2.2/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= +modernc.org/mathutil v1.4.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= +modernc.org/mathutil v1.5.0/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= +modernc.org/memory v1.1.1/go.mod h1:/0wo5ibyrQiaoUoH7f9D8dnglAmILJ5/cxZlRECf+Nw= +modernc.org/memory v1.2.0/go.mod h1:/0wo5ibyrQiaoUoH7f9D8dnglAmILJ5/cxZlRECf+Nw= +modernc.org/memory v1.2.1/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU= +modernc.org/opt v0.1.1/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= +modernc.org/opt v0.1.3/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= +modernc.org/sqlite v1.18.1/go.mod h1:6ho+Gow7oX5V+OiOQ6Tr4xeqbx13UZ6t+Fw9IRUG4d4= +modernc.org/strutil v1.1.1/go.mod h1:DE+MQQ/hjKBZS2zNInV5hhcipt5rLPWkmpbGeW5mmdw= +modernc.org/strutil v1.1.3/go.mod h1:MEHNA7PdEnEwLvspRMtWTNnp2nnyvMfkimT1NKNAGbw= +modernc.org/tcl v1.13.1/go.mod h1:XOLfOwzhkljL4itZkK6T72ckMgvj0BDsnKNdZVUOecw= +modernc.org/token v1.0.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= +modernc.org/z v1.5.1/go.mod h1:eWFB510QWW5Th9YGZT81s+LwvaAs3Q2yr4sP0rmLkv8= mvdan.cc/gofumpt v0.1.1/go.mod h1:yXG1r1WqZVKWbVRtBWKWX9+CxGYfA51nSomhM0woR48= mvdan.cc/gofumpt v0.3.1 h1:avhhrOmv0IuvQVK7fvwV91oFSGAk5/6Po8GXTzICeu8= mvdan.cc/gofumpt v0.3.1/go.mod h1:w3ymliuxvzVx8DAutBnVyDqYb1Niy/yCJt/lk821YCE= diff --git a/vendor/cloud.google.com/go/compute/internal/version.go b/vendor/cloud.google.com/go/compute/internal/version.go new file mode 100644 index 000000000..a5b020992 --- /dev/null +++ b/vendor/cloud.google.com/go/compute/internal/version.go @@ -0,0 +1,18 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +// Version is the current tagged release of the library. +const Version = "1.19.1" diff --git a/vendor/cloud.google.com/go/compute/metadata/CHANGES.md b/vendor/cloud.google.com/go/compute/metadata/CHANGES.md new file mode 100644 index 000000000..06b957349 --- /dev/null +++ b/vendor/cloud.google.com/go/compute/metadata/CHANGES.md @@ -0,0 +1,19 @@ +# Changes + +## [0.2.3](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.2.2...compute/metadata/v0.2.3) (2022-12-15) + + +### Bug Fixes + +* **compute/metadata:** Switch DNS lookup to an absolute lookup ([119b410](https://github.com/googleapis/google-cloud-go/commit/119b41060c7895e45e48aee5621ad35607c4d021)), refs [#7165](https://github.com/googleapis/google-cloud-go/issues/7165) + +## [0.2.2](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.2.1...compute/metadata/v0.2.2) (2022-12-01) + + +### Bug Fixes + +* **compute/metadata:** Set IdleConnTimeout for http.Client ([#7084](https://github.com/googleapis/google-cloud-go/issues/7084)) ([766516a](https://github.com/googleapis/google-cloud-go/commit/766516aaf3816bfb3159efeea65aa3d1d205a3e2)), refs [#5430](https://github.com/googleapis/google-cloud-go/issues/5430) + +## [0.1.0] (2022-10-26) + +Initial release of metadata being it's own module. diff --git a/vendor/cloud.google.com/go/compute/metadata/LICENSE b/vendor/cloud.google.com/go/compute/metadata/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/cloud.google.com/go/compute/metadata/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/cloud.google.com/go/compute/metadata/README.md b/vendor/cloud.google.com/go/compute/metadata/README.md new file mode 100644 index 000000000..f940fb2c8 --- /dev/null +++ b/vendor/cloud.google.com/go/compute/metadata/README.md @@ -0,0 +1,27 @@ +# Compute API + +[![Go Reference](https://pkg.go.dev/badge/cloud.google.com/go/compute.svg)](https://pkg.go.dev/cloud.google.com/go/compute/metadata) + +This is a utility library for communicating with Google Cloud metadata service +on Google Cloud. + +## Install + +```bash +go get cloud.google.com/go/compute/metadata +``` + +## Go Version Support + +See the [Go Versions Supported](https://github.com/googleapis/google-cloud-go#go-versions-supported) +section in the root directory's README. + +## Contributing + +Contributions are welcome. Please, see the [CONTRIBUTING](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/main/CONTRIBUTING.md) +document for details. + +Please note that this project is released with a Contributor Code of Conduct. +By participating in this project you agree to abide by its terms. See +[Contributor Code of Conduct](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/main/CONTRIBUTING.md#contributor-code-of-conduct) +for more information. diff --git a/vendor/cloud.google.com/go/compute/metadata/metadata.go b/vendor/cloud.google.com/go/compute/metadata/metadata.go index 1405d0967..c17faa142 100644 --- a/vendor/cloud.google.com/go/compute/metadata/metadata.go +++ b/vendor/cloud.google.com/go/compute/metadata/metadata.go @@ -70,7 +70,9 @@ func newDefaultHTTPClient() *http.Client { Timeout: 2 * time.Second, KeepAlive: 30 * time.Second, }).Dial, + IdleConnTimeout: 60 * time.Second, }, + Timeout: 5 * time.Second, } } @@ -145,7 +147,7 @@ func testOnGCE() bool { go func() { resolver := &net.Resolver{} - addrs, err := resolver.LookupHost(ctx, "metadata.google.internal") + addrs, err := resolver.LookupHost(ctx, "metadata.google.internal.") if err != nil || len(addrs) == 0 { resc <- false return diff --git a/vendor/cloud.google.com/go/compute/metadata/tidyfix.go b/vendor/cloud.google.com/go/compute/metadata/tidyfix.go new file mode 100644 index 000000000..4cef48500 --- /dev/null +++ b/vendor/cloud.google.com/go/compute/metadata/tidyfix.go @@ -0,0 +1,23 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file, and the {{.RootMod}} import, won't actually become part of +// the resultant binary. +//go:build modhack +// +build modhack + +package metadata + +// Necessary for safely adding multi-module repo. See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository +import _ "cloud.google.com/go/compute/internal" diff --git a/vendor/github.com/cespare/xxhash/v2/README.md b/vendor/github.com/cespare/xxhash/v2/README.md index 792b4a60b..8bf0e5b78 100644 --- a/vendor/github.com/cespare/xxhash/v2/README.md +++ b/vendor/github.com/cespare/xxhash/v2/README.md @@ -3,8 +3,7 @@ [![Go Reference](https://pkg.go.dev/badge/github.com/cespare/xxhash/v2.svg)](https://pkg.go.dev/github.com/cespare/xxhash/v2) [![Test](https://github.com/cespare/xxhash/actions/workflows/test.yml/badge.svg)](https://github.com/cespare/xxhash/actions/workflows/test.yml) -xxhash is a Go implementation of the 64-bit -[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a +xxhash is a Go implementation of the 64-bit [xxHash] algorithm, XXH64. This is a high-quality hashing algorithm that is much faster than anything in the Go standard library. @@ -25,8 +24,11 @@ func (*Digest) WriteString(string) (int, error) func (*Digest) Sum64() uint64 ``` -This implementation provides a fast pure-Go implementation and an even faster -assembly implementation for amd64. +The package is written with optimized pure Go and also contains even faster +assembly implementations for amd64 and arm64. If desired, the `purego` build tag +opts into using the Go code even on those architectures. + +[xxHash]: http://cyan4973.github.io/xxHash/ ## Compatibility @@ -45,19 +47,20 @@ I recommend using the latest release of Go. Here are some quick benchmarks comparing the pure-Go and assembly implementations of Sum64. -| input size | purego | asm | -| --- | --- | --- | -| 5 B | 979.66 MB/s | 1291.17 MB/s | -| 100 B | 7475.26 MB/s | 7973.40 MB/s | -| 4 KB | 17573.46 MB/s | 17602.65 MB/s | -| 10 MB | 17131.46 MB/s | 17142.16 MB/s | +| input size | purego | asm | +| ---------- | --------- | --------- | +| 4 B | 1.3 GB/s | 1.2 GB/s | +| 16 B | 2.9 GB/s | 3.5 GB/s | +| 100 B | 6.9 GB/s | 8.1 GB/s | +| 4 KB | 11.7 GB/s | 16.7 GB/s | +| 10 MB | 12.0 GB/s | 17.3 GB/s | -These numbers were generated on Ubuntu 18.04 with an Intel i7-8700K CPU using -the following commands under Go 1.11.2: +These numbers were generated on Ubuntu 20.04 with an Intel Xeon Platinum 8252C +CPU using the following commands under Go 1.19.2: ``` -$ go test -tags purego -benchtime 10s -bench '/xxhash,direct,bytes' -$ go test -benchtime 10s -bench '/xxhash,direct,bytes' +benchstat <(go test -tags purego -benchtime 500ms -count 15 -bench 'Sum64$') +benchstat <(go test -benchtime 500ms -count 15 -bench 'Sum64$') ``` ## Projects using this package diff --git a/vendor/github.com/cespare/xxhash/v2/testall.sh b/vendor/github.com/cespare/xxhash/v2/testall.sh new file mode 100644 index 000000000..94b9c4439 --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/testall.sh @@ -0,0 +1,10 @@ +#!/bin/bash +set -eu -o pipefail + +# Small convenience script for running the tests with various combinations of +# arch/tags. This assumes we're running on amd64 and have qemu available. + +go test ./... +go test -tags purego ./... +GOARCH=arm64 go test +GOARCH=arm64 go test -tags purego diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash.go b/vendor/github.com/cespare/xxhash/v2/xxhash.go index 15c835d54..a9e0d45c9 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash.go +++ b/vendor/github.com/cespare/xxhash/v2/xxhash.go @@ -16,19 +16,11 @@ const ( prime5 uint64 = 2870177450012600261 ) -// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where -// possible in the Go code is worth a small (but measurable) performance boost -// by avoiding some MOVQs. Vars are needed for the asm and also are useful for -// convenience in the Go code in a few places where we need to intentionally -// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the -// result overflows a uint64). -var ( - prime1v = prime1 - prime2v = prime2 - prime3v = prime3 - prime4v = prime4 - prime5v = prime5 -) +// Store the primes in an array as well. +// +// The consts are used when possible in Go code to avoid MOVs but we need a +// contiguous array of the assembly code. +var primes = [...]uint64{prime1, prime2, prime3, prime4, prime5} // Digest implements hash.Hash64. type Digest struct { @@ -50,10 +42,10 @@ func New() *Digest { // Reset clears the Digest's state so that it can be reused. func (d *Digest) Reset() { - d.v1 = prime1v + prime2 + d.v1 = primes[0] + prime2 d.v2 = prime2 d.v3 = 0 - d.v4 = -prime1v + d.v4 = -primes[0] d.total = 0 d.n = 0 } @@ -69,21 +61,23 @@ func (d *Digest) Write(b []byte) (n int, err error) { n = len(b) d.total += uint64(n) + memleft := d.mem[d.n&(len(d.mem)-1):] + if d.n+n < 32 { // This new data doesn't even fill the current block. - copy(d.mem[d.n:], b) + copy(memleft, b) d.n += n return } if d.n > 0 { // Finish off the partial block. - copy(d.mem[d.n:], b) + c := copy(memleft, b) d.v1 = round(d.v1, u64(d.mem[0:8])) d.v2 = round(d.v2, u64(d.mem[8:16])) d.v3 = round(d.v3, u64(d.mem[16:24])) d.v4 = round(d.v4, u64(d.mem[24:32])) - b = b[32-d.n:] + b = b[c:] d.n = 0 } @@ -133,21 +127,20 @@ func (d *Digest) Sum64() uint64 { h += d.total - i, end := 0, d.n - for ; i+8 <= end; i += 8 { - k1 := round(0, u64(d.mem[i:i+8])) + b := d.mem[:d.n&(len(d.mem)-1)] + for ; len(b) >= 8; b = b[8:] { + k1 := round(0, u64(b[:8])) h ^= k1 h = rol27(h)*prime1 + prime4 } - if i+4 <= end { - h ^= uint64(u32(d.mem[i:i+4])) * prime1 + if len(b) >= 4 { + h ^= uint64(u32(b[:4])) * prime1 h = rol23(h)*prime2 + prime3 - i += 4 + b = b[4:] } - for i < end { - h ^= uint64(d.mem[i]) * prime5 + for ; len(b) > 0; b = b[1:] { + h ^= uint64(b[0]) * prime5 h = rol11(h) * prime1 - i++ } h ^= h >> 33 diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s index be8db5bf7..3e8b13257 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s @@ -1,215 +1,209 @@ +//go:build !appengine && gc && !purego // +build !appengine // +build gc // +build !purego #include "textflag.h" -// Register allocation: -// AX h -// SI pointer to advance through b -// DX n -// BX loop end -// R8 v1, k1 -// R9 v2 -// R10 v3 -// R11 v4 -// R12 tmp -// R13 prime1v -// R14 prime2v -// DI prime4v - -// round reads from and advances the buffer pointer in SI. -// It assumes that R13 has prime1v and R14 has prime2v. -#define round(r) \ - MOVQ (SI), R12 \ - ADDQ $8, SI \ - IMULQ R14, R12 \ - ADDQ R12, r \ - ROLQ $31, r \ - IMULQ R13, r - -// mergeRound applies a merge round on the two registers acc and val. -// It assumes that R13 has prime1v, R14 has prime2v, and DI has prime4v. -#define mergeRound(acc, val) \ - IMULQ R14, val \ - ROLQ $31, val \ - IMULQ R13, val \ - XORQ val, acc \ - IMULQ R13, acc \ - ADDQ DI, acc +// Registers: +#define h AX +#define d AX +#define p SI // pointer to advance through b +#define n DX +#define end BX // loop end +#define v1 R8 +#define v2 R9 +#define v3 R10 +#define v4 R11 +#define x R12 +#define prime1 R13 +#define prime2 R14 +#define prime4 DI + +#define round(acc, x) \ + IMULQ prime2, x \ + ADDQ x, acc \ + ROLQ $31, acc \ + IMULQ prime1, acc + +// round0 performs the operation x = round(0, x). +#define round0(x) \ + IMULQ prime2, x \ + ROLQ $31, x \ + IMULQ prime1, x + +// mergeRound applies a merge round on the two registers acc and x. +// It assumes that prime1, prime2, and prime4 have been loaded. +#define mergeRound(acc, x) \ + round0(x) \ + XORQ x, acc \ + IMULQ prime1, acc \ + ADDQ prime4, acc + +// blockLoop processes as many 32-byte blocks as possible, +// updating v1, v2, v3, and v4. It assumes that there is at least one block +// to process. +#define blockLoop() \ +loop: \ + MOVQ +0(p), x \ + round(v1, x) \ + MOVQ +8(p), x \ + round(v2, x) \ + MOVQ +16(p), x \ + round(v3, x) \ + MOVQ +24(p), x \ + round(v4, x) \ + ADDQ $32, p \ + CMPQ p, end \ + JLE loop // func Sum64(b []byte) uint64 -TEXT ·Sum64(SB), NOSPLIT, $0-32 +TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32 // Load fixed primes. - MOVQ ·prime1v(SB), R13 - MOVQ ·prime2v(SB), R14 - MOVQ ·prime4v(SB), DI + MOVQ ·primes+0(SB), prime1 + MOVQ ·primes+8(SB), prime2 + MOVQ ·primes+24(SB), prime4 // Load slice. - MOVQ b_base+0(FP), SI - MOVQ b_len+8(FP), DX - LEAQ (SI)(DX*1), BX + MOVQ b_base+0(FP), p + MOVQ b_len+8(FP), n + LEAQ (p)(n*1), end // The first loop limit will be len(b)-32. - SUBQ $32, BX + SUBQ $32, end // Check whether we have at least one block. - CMPQ DX, $32 + CMPQ n, $32 JLT noBlocks // Set up initial state (v1, v2, v3, v4). - MOVQ R13, R8 - ADDQ R14, R8 - MOVQ R14, R9 - XORQ R10, R10 - XORQ R11, R11 - SUBQ R13, R11 - - // Loop until SI > BX. -blockLoop: - round(R8) - round(R9) - round(R10) - round(R11) - - CMPQ SI, BX - JLE blockLoop - - MOVQ R8, AX - ROLQ $1, AX - MOVQ R9, R12 - ROLQ $7, R12 - ADDQ R12, AX - MOVQ R10, R12 - ROLQ $12, R12 - ADDQ R12, AX - MOVQ R11, R12 - ROLQ $18, R12 - ADDQ R12, AX - - mergeRound(AX, R8) - mergeRound(AX, R9) - mergeRound(AX, R10) - mergeRound(AX, R11) + MOVQ prime1, v1 + ADDQ prime2, v1 + MOVQ prime2, v2 + XORQ v3, v3 + XORQ v4, v4 + SUBQ prime1, v4 + + blockLoop() + + MOVQ v1, h + ROLQ $1, h + MOVQ v2, x + ROLQ $7, x + ADDQ x, h + MOVQ v3, x + ROLQ $12, x + ADDQ x, h + MOVQ v4, x + ROLQ $18, x + ADDQ x, h + + mergeRound(h, v1) + mergeRound(h, v2) + mergeRound(h, v3) + mergeRound(h, v4) JMP afterBlocks noBlocks: - MOVQ ·prime5v(SB), AX + MOVQ ·primes+32(SB), h afterBlocks: - ADDQ DX, AX - - // Right now BX has len(b)-32, and we want to loop until SI > len(b)-8. - ADDQ $24, BX - - CMPQ SI, BX - JG fourByte - -wordLoop: - // Calculate k1. - MOVQ (SI), R8 - ADDQ $8, SI - IMULQ R14, R8 - ROLQ $31, R8 - IMULQ R13, R8 - - XORQ R8, AX - ROLQ $27, AX - IMULQ R13, AX - ADDQ DI, AX - - CMPQ SI, BX - JLE wordLoop - -fourByte: - ADDQ $4, BX - CMPQ SI, BX - JG singles - - MOVL (SI), R8 - ADDQ $4, SI - IMULQ R13, R8 - XORQ R8, AX - - ROLQ $23, AX - IMULQ R14, AX - ADDQ ·prime3v(SB), AX - -singles: - ADDQ $4, BX - CMPQ SI, BX + ADDQ n, h + + ADDQ $24, end + CMPQ p, end + JG try4 + +loop8: + MOVQ (p), x + ADDQ $8, p + round0(x) + XORQ x, h + ROLQ $27, h + IMULQ prime1, h + ADDQ prime4, h + + CMPQ p, end + JLE loop8 + +try4: + ADDQ $4, end + CMPQ p, end + JG try1 + + MOVL (p), x + ADDQ $4, p + IMULQ prime1, x + XORQ x, h + + ROLQ $23, h + IMULQ prime2, h + ADDQ ·primes+16(SB), h + +try1: + ADDQ $4, end + CMPQ p, end JGE finalize -singlesLoop: - MOVBQZX (SI), R12 - ADDQ $1, SI - IMULQ ·prime5v(SB), R12 - XORQ R12, AX +loop1: + MOVBQZX (p), x + ADDQ $1, p + IMULQ ·primes+32(SB), x + XORQ x, h + ROLQ $11, h + IMULQ prime1, h - ROLQ $11, AX - IMULQ R13, AX - - CMPQ SI, BX - JL singlesLoop + CMPQ p, end + JL loop1 finalize: - MOVQ AX, R12 - SHRQ $33, R12 - XORQ R12, AX - IMULQ R14, AX - MOVQ AX, R12 - SHRQ $29, R12 - XORQ R12, AX - IMULQ ·prime3v(SB), AX - MOVQ AX, R12 - SHRQ $32, R12 - XORQ R12, AX - - MOVQ AX, ret+24(FP) + MOVQ h, x + SHRQ $33, x + XORQ x, h + IMULQ prime2, h + MOVQ h, x + SHRQ $29, x + XORQ x, h + IMULQ ·primes+16(SB), h + MOVQ h, x + SHRQ $32, x + XORQ x, h + + MOVQ h, ret+24(FP) RET -// writeBlocks uses the same registers as above except that it uses AX to store -// the d pointer. - // func writeBlocks(d *Digest, b []byte) int -TEXT ·writeBlocks(SB), NOSPLIT, $0-40 +TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40 // Load fixed primes needed for round. - MOVQ ·prime1v(SB), R13 - MOVQ ·prime2v(SB), R14 + MOVQ ·primes+0(SB), prime1 + MOVQ ·primes+8(SB), prime2 // Load slice. - MOVQ b_base+8(FP), SI - MOVQ b_len+16(FP), DX - LEAQ (SI)(DX*1), BX - SUBQ $32, BX + MOVQ b_base+8(FP), p + MOVQ b_len+16(FP), n + LEAQ (p)(n*1), end + SUBQ $32, end // Load vN from d. - MOVQ d+0(FP), AX - MOVQ 0(AX), R8 // v1 - MOVQ 8(AX), R9 // v2 - MOVQ 16(AX), R10 // v3 - MOVQ 24(AX), R11 // v4 + MOVQ s+0(FP), d + MOVQ 0(d), v1 + MOVQ 8(d), v2 + MOVQ 16(d), v3 + MOVQ 24(d), v4 // We don't need to check the loop condition here; this function is // always called with at least one block of data to process. -blockLoop: - round(R8) - round(R9) - round(R10) - round(R11) - - CMPQ SI, BX - JLE blockLoop + blockLoop() // Copy vN back to d. - MOVQ R8, 0(AX) - MOVQ R9, 8(AX) - MOVQ R10, 16(AX) - MOVQ R11, 24(AX) - - // The number of bytes written is SI minus the old base pointer. - SUBQ b_base+8(FP), SI - MOVQ SI, ret+32(FP) + MOVQ v1, 0(d) + MOVQ v2, 8(d) + MOVQ v3, 16(d) + MOVQ v4, 24(d) + + // The number of bytes written is p minus the old base pointer. + SUBQ b_base+8(FP), p + MOVQ p, ret+32(FP) RET diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s b/vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s new file mode 100644 index 000000000..7e3145a22 --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s @@ -0,0 +1,183 @@ +//go:build !appengine && gc && !purego +// +build !appengine +// +build gc +// +build !purego + +#include "textflag.h" + +// Registers: +#define digest R1 +#define h R2 // return value +#define p R3 // input pointer +#define n R4 // input length +#define nblocks R5 // n / 32 +#define prime1 R7 +#define prime2 R8 +#define prime3 R9 +#define prime4 R10 +#define prime5 R11 +#define v1 R12 +#define v2 R13 +#define v3 R14 +#define v4 R15 +#define x1 R20 +#define x2 R21 +#define x3 R22 +#define x4 R23 + +#define round(acc, x) \ + MADD prime2, acc, x, acc \ + ROR $64-31, acc \ + MUL prime1, acc + +// round0 performs the operation x = round(0, x). +#define round0(x) \ + MUL prime2, x \ + ROR $64-31, x \ + MUL prime1, x + +#define mergeRound(acc, x) \ + round0(x) \ + EOR x, acc \ + MADD acc, prime4, prime1, acc + +// blockLoop processes as many 32-byte blocks as possible, +// updating v1, v2, v3, and v4. It assumes that n >= 32. +#define blockLoop() \ + LSR $5, n, nblocks \ + PCALIGN $16 \ + loop: \ + LDP.P 16(p), (x1, x2) \ + LDP.P 16(p), (x3, x4) \ + round(v1, x1) \ + round(v2, x2) \ + round(v3, x3) \ + round(v4, x4) \ + SUB $1, nblocks \ + CBNZ nblocks, loop + +// func Sum64(b []byte) uint64 +TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32 + LDP b_base+0(FP), (p, n) + + LDP ·primes+0(SB), (prime1, prime2) + LDP ·primes+16(SB), (prime3, prime4) + MOVD ·primes+32(SB), prime5 + + CMP $32, n + CSEL LT, prime5, ZR, h // if n < 32 { h = prime5 } else { h = 0 } + BLT afterLoop + + ADD prime1, prime2, v1 + MOVD prime2, v2 + MOVD $0, v3 + NEG prime1, v4 + + blockLoop() + + ROR $64-1, v1, x1 + ROR $64-7, v2, x2 + ADD x1, x2 + ROR $64-12, v3, x3 + ROR $64-18, v4, x4 + ADD x3, x4 + ADD x2, x4, h + + mergeRound(h, v1) + mergeRound(h, v2) + mergeRound(h, v3) + mergeRound(h, v4) + +afterLoop: + ADD n, h + + TBZ $4, n, try8 + LDP.P 16(p), (x1, x2) + + round0(x1) + + // NOTE: here and below, sequencing the EOR after the ROR (using a + // rotated register) is worth a small but measurable speedup for small + // inputs. + ROR $64-27, h + EOR x1 @> 64-27, h, h + MADD h, prime4, prime1, h + + round0(x2) + ROR $64-27, h + EOR x2 @> 64-27, h, h + MADD h, prime4, prime1, h + +try8: + TBZ $3, n, try4 + MOVD.P 8(p), x1 + + round0(x1) + ROR $64-27, h + EOR x1 @> 64-27, h, h + MADD h, prime4, prime1, h + +try4: + TBZ $2, n, try2 + MOVWU.P 4(p), x2 + + MUL prime1, x2 + ROR $64-23, h + EOR x2 @> 64-23, h, h + MADD h, prime3, prime2, h + +try2: + TBZ $1, n, try1 + MOVHU.P 2(p), x3 + AND $255, x3, x1 + LSR $8, x3, x2 + + MUL prime5, x1 + ROR $64-11, h + EOR x1 @> 64-11, h, h + MUL prime1, h + + MUL prime5, x2 + ROR $64-11, h + EOR x2 @> 64-11, h, h + MUL prime1, h + +try1: + TBZ $0, n, finalize + MOVBU (p), x4 + + MUL prime5, x4 + ROR $64-11, h + EOR x4 @> 64-11, h, h + MUL prime1, h + +finalize: + EOR h >> 33, h + MUL prime2, h + EOR h >> 29, h + MUL prime3, h + EOR h >> 32, h + + MOVD h, ret+24(FP) + RET + +// func writeBlocks(d *Digest, b []byte) int +TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40 + LDP ·primes+0(SB), (prime1, prime2) + + // Load state. Assume v[1-4] are stored contiguously. + MOVD d+0(FP), digest + LDP 0(digest), (v1, v2) + LDP 16(digest), (v3, v4) + + LDP b_base+8(FP), (p, n) + + blockLoop() + + // Store updated state. + STP (v1, v2), 0(digest) + STP (v3, v4), 16(digest) + + BIC $31, n + MOVD n, ret+32(FP) + RET diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go b/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go similarity index 73% rename from vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go rename to vendor/github.com/cespare/xxhash/v2/xxhash_asm.go index ad14b807f..9216e0a40 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go @@ -1,3 +1,5 @@ +//go:build (amd64 || arm64) && !appengine && gc && !purego +// +build amd64 arm64 // +build !appengine // +build gc // +build !purego diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_other.go b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go index 4a5a82160..26df13bba 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_other.go +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go @@ -1,4 +1,5 @@ -// +build !amd64 appengine !gc purego +//go:build (!amd64 && !arm64) || appengine || !gc || purego +// +build !amd64,!arm64 appengine !gc purego package xxhash @@ -14,10 +15,10 @@ func Sum64(b []byte) uint64 { var h uint64 if n >= 32 { - v1 := prime1v + prime2 + v1 := primes[0] + prime2 v2 := prime2 v3 := uint64(0) - v4 := -prime1v + v4 := -primes[0] for len(b) >= 32 { v1 = round(v1, u64(b[0:8:len(b)])) v2 = round(v2, u64(b[8:16:len(b)])) @@ -36,19 +37,18 @@ func Sum64(b []byte) uint64 { h += uint64(n) - i, end := 0, len(b) - for ; i+8 <= end; i += 8 { - k1 := round(0, u64(b[i:i+8:len(b)])) + for ; len(b) >= 8; b = b[8:] { + k1 := round(0, u64(b[:8])) h ^= k1 h = rol27(h)*prime1 + prime4 } - if i+4 <= end { - h ^= uint64(u32(b[i:i+4:len(b)])) * prime1 + if len(b) >= 4 { + h ^= uint64(u32(b[:4])) * prime1 h = rol23(h)*prime2 + prime3 - i += 4 + b = b[4:] } - for ; i < end; i++ { - h ^= uint64(b[i]) * prime5 + for ; len(b) > 0; b = b[1:] { + h ^= uint64(b[0]) * prime5 h = rol11(h) * prime1 } diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go index fc9bea7a3..e86f1b5fd 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go @@ -1,3 +1,4 @@ +//go:build appengine // +build appengine // This file contains the safe implementations of otherwise unsafe-using code. diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go index 376e0ca2e..1c1638fd8 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go @@ -1,3 +1,4 @@ +//go:build !appengine // +build !appengine // This file encapsulates usage of unsafe. @@ -11,7 +12,7 @@ import ( // In the future it's possible that compiler optimizations will make these // XxxString functions unnecessary by realizing that calls such as -// Sum64([]byte(s)) don't need to copy s. See https://golang.org/issue/2205. +// Sum64([]byte(s)) don't need to copy s. See https://go.dev/issue/2205. // If that happens, even if we keep these functions they can be replaced with // the trivial safe code. diff --git a/vendor/github.com/golang/protobuf/jsonpb/decode.go b/vendor/github.com/golang/protobuf/jsonpb/decode.go new file mode 100644 index 000000000..6c16c255f --- /dev/null +++ b/vendor/github.com/golang/protobuf/jsonpb/decode.go @@ -0,0 +1,530 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package jsonpb + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "math" + "reflect" + "strconv" + "strings" + "time" + + "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/encoding/protojson" + protoV2 "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +const wrapJSONUnmarshalV2 = false + +// UnmarshalNext unmarshals the next JSON object from d into m. +func UnmarshalNext(d *json.Decoder, m proto.Message) error { + return new(Unmarshaler).UnmarshalNext(d, m) +} + +// Unmarshal unmarshals a JSON object from r into m. +func Unmarshal(r io.Reader, m proto.Message) error { + return new(Unmarshaler).Unmarshal(r, m) +} + +// UnmarshalString unmarshals a JSON object from s into m. +func UnmarshalString(s string, m proto.Message) error { + return new(Unmarshaler).Unmarshal(strings.NewReader(s), m) +} + +// Unmarshaler is a configurable object for converting from a JSON +// representation to a protocol buffer object. +type Unmarshaler struct { + // AllowUnknownFields specifies whether to allow messages to contain + // unknown JSON fields, as opposed to failing to unmarshal. + AllowUnknownFields bool + + // AnyResolver is used to resolve the google.protobuf.Any well-known type. + // If unset, the global registry is used by default. + AnyResolver AnyResolver +} + +// JSONPBUnmarshaler is implemented by protobuf messages that customize the way +// they are unmarshaled from JSON. Messages that implement this should also +// implement JSONPBMarshaler so that the custom format can be produced. +// +// The JSON unmarshaling must follow the JSON to proto specification: +// https://developers.google.com/protocol-buffers/docs/proto3#json +// +// Deprecated: Custom types should implement protobuf reflection instead. +type JSONPBUnmarshaler interface { + UnmarshalJSONPB(*Unmarshaler, []byte) error +} + +// Unmarshal unmarshals a JSON object from r into m. +func (u *Unmarshaler) Unmarshal(r io.Reader, m proto.Message) error { + return u.UnmarshalNext(json.NewDecoder(r), m) +} + +// UnmarshalNext unmarshals the next JSON object from d into m. +func (u *Unmarshaler) UnmarshalNext(d *json.Decoder, m proto.Message) error { + if m == nil { + return errors.New("invalid nil message") + } + + // Parse the next JSON object from the stream. + raw := json.RawMessage{} + if err := d.Decode(&raw); err != nil { + return err + } + + // Check for custom unmarshalers first since they may not properly + // implement protobuf reflection that the logic below relies on. + if jsu, ok := m.(JSONPBUnmarshaler); ok { + return jsu.UnmarshalJSONPB(u, raw) + } + + mr := proto.MessageReflect(m) + + // NOTE: For historical reasons, a top-level null is treated as a noop. + // This is incorrect, but kept for compatibility. + if string(raw) == "null" && mr.Descriptor().FullName() != "google.protobuf.Value" { + return nil + } + + if wrapJSONUnmarshalV2 { + // NOTE: If input message is non-empty, we need to preserve merge semantics + // of the old jsonpb implementation. These semantics are not supported by + // the protobuf JSON specification. + isEmpty := true + mr.Range(func(protoreflect.FieldDescriptor, protoreflect.Value) bool { + isEmpty = false // at least one iteration implies non-empty + return false + }) + if !isEmpty { + // Perform unmarshaling into a newly allocated, empty message. + mr = mr.New() + + // Use a defer to copy all unmarshaled fields into the original message. + dst := proto.MessageReflect(m) + defer mr.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + dst.Set(fd, v) + return true + }) + } + + // Unmarshal using the v2 JSON unmarshaler. + opts := protojson.UnmarshalOptions{ + DiscardUnknown: u.AllowUnknownFields, + } + if u.AnyResolver != nil { + opts.Resolver = anyResolver{u.AnyResolver} + } + return opts.Unmarshal(raw, mr.Interface()) + } else { + if err := u.unmarshalMessage(mr, raw); err != nil { + return err + } + return protoV2.CheckInitialized(mr.Interface()) + } +} + +func (u *Unmarshaler) unmarshalMessage(m protoreflect.Message, in []byte) error { + md := m.Descriptor() + fds := md.Fields() + + if jsu, ok := proto.MessageV1(m.Interface()).(JSONPBUnmarshaler); ok { + return jsu.UnmarshalJSONPB(u, in) + } + + if string(in) == "null" && md.FullName() != "google.protobuf.Value" { + return nil + } + + switch wellKnownType(md.FullName()) { + case "Any": + var jsonObject map[string]json.RawMessage + if err := json.Unmarshal(in, &jsonObject); err != nil { + return err + } + + rawTypeURL, ok := jsonObject["@type"] + if !ok { + return errors.New("Any JSON doesn't have '@type'") + } + typeURL, err := unquoteString(string(rawTypeURL)) + if err != nil { + return fmt.Errorf("can't unmarshal Any's '@type': %q", rawTypeURL) + } + m.Set(fds.ByNumber(1), protoreflect.ValueOfString(typeURL)) + + var m2 protoreflect.Message + if u.AnyResolver != nil { + mi, err := u.AnyResolver.Resolve(typeURL) + if err != nil { + return err + } + m2 = proto.MessageReflect(mi) + } else { + mt, err := protoregistry.GlobalTypes.FindMessageByURL(typeURL) + if err != nil { + if err == protoregistry.NotFound { + return fmt.Errorf("could not resolve Any message type: %v", typeURL) + } + return err + } + m2 = mt.New() + } + + if wellKnownType(m2.Descriptor().FullName()) != "" { + rawValue, ok := jsonObject["value"] + if !ok { + return errors.New("Any JSON doesn't have 'value'") + } + if err := u.unmarshalMessage(m2, rawValue); err != nil { + return fmt.Errorf("can't unmarshal Any nested proto %v: %v", typeURL, err) + } + } else { + delete(jsonObject, "@type") + rawJSON, err := json.Marshal(jsonObject) + if err != nil { + return fmt.Errorf("can't generate JSON for Any's nested proto to be unmarshaled: %v", err) + } + if err = u.unmarshalMessage(m2, rawJSON); err != nil { + return fmt.Errorf("can't unmarshal Any nested proto %v: %v", typeURL, err) + } + } + + rawWire, err := protoV2.Marshal(m2.Interface()) + if err != nil { + return fmt.Errorf("can't marshal proto %v into Any.Value: %v", typeURL, err) + } + m.Set(fds.ByNumber(2), protoreflect.ValueOfBytes(rawWire)) + return nil + case "BoolValue", "BytesValue", "StringValue", + "Int32Value", "UInt32Value", "FloatValue", + "Int64Value", "UInt64Value", "DoubleValue": + fd := fds.ByNumber(1) + v, err := u.unmarshalValue(m.NewField(fd), in, fd) + if err != nil { + return err + } + m.Set(fd, v) + return nil + case "Duration": + v, err := unquoteString(string(in)) + if err != nil { + return err + } + d, err := time.ParseDuration(v) + if err != nil { + return fmt.Errorf("bad Duration: %v", err) + } + + sec := d.Nanoseconds() / 1e9 + nsec := d.Nanoseconds() % 1e9 + m.Set(fds.ByNumber(1), protoreflect.ValueOfInt64(int64(sec))) + m.Set(fds.ByNumber(2), protoreflect.ValueOfInt32(int32(nsec))) + return nil + case "Timestamp": + v, err := unquoteString(string(in)) + if err != nil { + return err + } + t, err := time.Parse(time.RFC3339Nano, v) + if err != nil { + return fmt.Errorf("bad Timestamp: %v", err) + } + + sec := t.Unix() + nsec := t.Nanosecond() + m.Set(fds.ByNumber(1), protoreflect.ValueOfInt64(int64(sec))) + m.Set(fds.ByNumber(2), protoreflect.ValueOfInt32(int32(nsec))) + return nil + case "Value": + switch { + case string(in) == "null": + m.Set(fds.ByNumber(1), protoreflect.ValueOfEnum(0)) + case string(in) == "true": + m.Set(fds.ByNumber(4), protoreflect.ValueOfBool(true)) + case string(in) == "false": + m.Set(fds.ByNumber(4), protoreflect.ValueOfBool(false)) + case hasPrefixAndSuffix('"', in, '"'): + s, err := unquoteString(string(in)) + if err != nil { + return fmt.Errorf("unrecognized type for Value %q", in) + } + m.Set(fds.ByNumber(3), protoreflect.ValueOfString(s)) + case hasPrefixAndSuffix('[', in, ']'): + v := m.Mutable(fds.ByNumber(6)) + return u.unmarshalMessage(v.Message(), in) + case hasPrefixAndSuffix('{', in, '}'): + v := m.Mutable(fds.ByNumber(5)) + return u.unmarshalMessage(v.Message(), in) + default: + f, err := strconv.ParseFloat(string(in), 0) + if err != nil { + return fmt.Errorf("unrecognized type for Value %q", in) + } + m.Set(fds.ByNumber(2), protoreflect.ValueOfFloat64(f)) + } + return nil + case "ListValue": + var jsonArray []json.RawMessage + if err := json.Unmarshal(in, &jsonArray); err != nil { + return fmt.Errorf("bad ListValue: %v", err) + } + + lv := m.Mutable(fds.ByNumber(1)).List() + for _, raw := range jsonArray { + ve := lv.NewElement() + if err := u.unmarshalMessage(ve.Message(), raw); err != nil { + return err + } + lv.Append(ve) + } + return nil + case "Struct": + var jsonObject map[string]json.RawMessage + if err := json.Unmarshal(in, &jsonObject); err != nil { + return fmt.Errorf("bad StructValue: %v", err) + } + + mv := m.Mutable(fds.ByNumber(1)).Map() + for key, raw := range jsonObject { + kv := protoreflect.ValueOf(key).MapKey() + vv := mv.NewValue() + if err := u.unmarshalMessage(vv.Message(), raw); err != nil { + return fmt.Errorf("bad value in StructValue for key %q: %v", key, err) + } + mv.Set(kv, vv) + } + return nil + } + + var jsonObject map[string]json.RawMessage + if err := json.Unmarshal(in, &jsonObject); err != nil { + return err + } + + // Handle known fields. + for i := 0; i < fds.Len(); i++ { + fd := fds.Get(i) + if fd.IsWeak() && fd.Message().IsPlaceholder() { + continue // weak reference is not linked in + } + + // Search for any raw JSON value associated with this field. + var raw json.RawMessage + name := string(fd.Name()) + if fd.Kind() == protoreflect.GroupKind { + name = string(fd.Message().Name()) + } + if v, ok := jsonObject[name]; ok { + delete(jsonObject, name) + raw = v + } + name = string(fd.JSONName()) + if v, ok := jsonObject[name]; ok { + delete(jsonObject, name) + raw = v + } + + field := m.NewField(fd) + // Unmarshal the field value. + if raw == nil || (string(raw) == "null" && !isSingularWellKnownValue(fd) && !isSingularJSONPBUnmarshaler(field, fd)) { + continue + } + v, err := u.unmarshalValue(field, raw, fd) + if err != nil { + return err + } + m.Set(fd, v) + } + + // Handle extension fields. + for name, raw := range jsonObject { + if !strings.HasPrefix(name, "[") || !strings.HasSuffix(name, "]") { + continue + } + + // Resolve the extension field by name. + xname := protoreflect.FullName(name[len("[") : len(name)-len("]")]) + xt, _ := protoregistry.GlobalTypes.FindExtensionByName(xname) + if xt == nil && isMessageSet(md) { + xt, _ = protoregistry.GlobalTypes.FindExtensionByName(xname.Append("message_set_extension")) + } + if xt == nil { + continue + } + delete(jsonObject, name) + fd := xt.TypeDescriptor() + if fd.ContainingMessage().FullName() != m.Descriptor().FullName() { + return fmt.Errorf("extension field %q does not extend message %q", xname, m.Descriptor().FullName()) + } + + field := m.NewField(fd) + // Unmarshal the field value. + if raw == nil || (string(raw) == "null" && !isSingularWellKnownValue(fd) && !isSingularJSONPBUnmarshaler(field, fd)) { + continue + } + v, err := u.unmarshalValue(field, raw, fd) + if err != nil { + return err + } + m.Set(fd, v) + } + + if !u.AllowUnknownFields && len(jsonObject) > 0 { + for name := range jsonObject { + return fmt.Errorf("unknown field %q in %v", name, md.FullName()) + } + } + return nil +} + +func isSingularWellKnownValue(fd protoreflect.FieldDescriptor) bool { + if fd.Cardinality() == protoreflect.Repeated { + return false + } + if md := fd.Message(); md != nil { + return md.FullName() == "google.protobuf.Value" + } + if ed := fd.Enum(); ed != nil { + return ed.FullName() == "google.protobuf.NullValue" + } + return false +} + +func isSingularJSONPBUnmarshaler(v protoreflect.Value, fd protoreflect.FieldDescriptor) bool { + if fd.Message() != nil && fd.Cardinality() != protoreflect.Repeated { + _, ok := proto.MessageV1(v.Interface()).(JSONPBUnmarshaler) + return ok + } + return false +} + +func (u *Unmarshaler) unmarshalValue(v protoreflect.Value, in []byte, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) { + switch { + case fd.IsList(): + var jsonArray []json.RawMessage + if err := json.Unmarshal(in, &jsonArray); err != nil { + return v, err + } + lv := v.List() + for _, raw := range jsonArray { + ve, err := u.unmarshalSingularValue(lv.NewElement(), raw, fd) + if err != nil { + return v, err + } + lv.Append(ve) + } + return v, nil + case fd.IsMap(): + var jsonObject map[string]json.RawMessage + if err := json.Unmarshal(in, &jsonObject); err != nil { + return v, err + } + kfd := fd.MapKey() + vfd := fd.MapValue() + mv := v.Map() + for key, raw := range jsonObject { + var kv protoreflect.MapKey + if kfd.Kind() == protoreflect.StringKind { + kv = protoreflect.ValueOf(key).MapKey() + } else { + v, err := u.unmarshalSingularValue(kfd.Default(), []byte(key), kfd) + if err != nil { + return v, err + } + kv = v.MapKey() + } + + vv, err := u.unmarshalSingularValue(mv.NewValue(), raw, vfd) + if err != nil { + return v, err + } + mv.Set(kv, vv) + } + return v, nil + default: + return u.unmarshalSingularValue(v, in, fd) + } +} + +var nonFinite = map[string]float64{ + `"NaN"`: math.NaN(), + `"Infinity"`: math.Inf(+1), + `"-Infinity"`: math.Inf(-1), +} + +func (u *Unmarshaler) unmarshalSingularValue(v protoreflect.Value, in []byte, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) { + switch fd.Kind() { + case protoreflect.BoolKind: + return unmarshalValue(in, new(bool)) + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: + return unmarshalValue(trimQuote(in), new(int32)) + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: + return unmarshalValue(trimQuote(in), new(int64)) + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: + return unmarshalValue(trimQuote(in), new(uint32)) + case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: + return unmarshalValue(trimQuote(in), new(uint64)) + case protoreflect.FloatKind: + if f, ok := nonFinite[string(in)]; ok { + return protoreflect.ValueOfFloat32(float32(f)), nil + } + return unmarshalValue(trimQuote(in), new(float32)) + case protoreflect.DoubleKind: + if f, ok := nonFinite[string(in)]; ok { + return protoreflect.ValueOfFloat64(float64(f)), nil + } + return unmarshalValue(trimQuote(in), new(float64)) + case protoreflect.StringKind: + return unmarshalValue(in, new(string)) + case protoreflect.BytesKind: + return unmarshalValue(in, new([]byte)) + case protoreflect.EnumKind: + if hasPrefixAndSuffix('"', in, '"') { + vd := fd.Enum().Values().ByName(protoreflect.Name(trimQuote(in))) + if vd == nil { + return v, fmt.Errorf("unknown value %q for enum %s", in, fd.Enum().FullName()) + } + return protoreflect.ValueOfEnum(vd.Number()), nil + } + return unmarshalValue(in, new(protoreflect.EnumNumber)) + case protoreflect.MessageKind, protoreflect.GroupKind: + err := u.unmarshalMessage(v.Message(), in) + return v, err + default: + panic(fmt.Sprintf("invalid kind %v", fd.Kind())) + } +} + +func unmarshalValue(in []byte, v interface{}) (protoreflect.Value, error) { + err := json.Unmarshal(in, v) + return protoreflect.ValueOf(reflect.ValueOf(v).Elem().Interface()), err +} + +func unquoteString(in string) (out string, err error) { + err = json.Unmarshal([]byte(in), &out) + return out, err +} + +func hasPrefixAndSuffix(prefix byte, in []byte, suffix byte) bool { + if len(in) >= 2 && in[0] == prefix && in[len(in)-1] == suffix { + return true + } + return false +} + +// trimQuote is like unquoteString but simply strips surrounding quotes. +// This is incorrect, but is behavior done by the legacy implementation. +func trimQuote(in []byte) []byte { + if len(in) >= 2 && in[0] == '"' && in[len(in)-1] == '"' { + in = in[1 : len(in)-1] + } + return in +} diff --git a/vendor/github.com/golang/protobuf/jsonpb/encode.go b/vendor/github.com/golang/protobuf/jsonpb/encode.go new file mode 100644 index 000000000..685c80a62 --- /dev/null +++ b/vendor/github.com/golang/protobuf/jsonpb/encode.go @@ -0,0 +1,559 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package jsonpb + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "math" + "reflect" + "sort" + "strconv" + "strings" + "time" + + "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/encoding/protojson" + protoV2 "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +const wrapJSONMarshalV2 = false + +// Marshaler is a configurable object for marshaling protocol buffer messages +// to the specified JSON representation. +type Marshaler struct { + // OrigName specifies whether to use the original protobuf name for fields. + OrigName bool + + // EnumsAsInts specifies whether to render enum values as integers, + // as opposed to string values. + EnumsAsInts bool + + // EmitDefaults specifies whether to render fields with zero values. + EmitDefaults bool + + // Indent controls whether the output is compact or not. + // If empty, the output is compact JSON. Otherwise, every JSON object + // entry and JSON array value will be on its own line. + // Each line will be preceded by repeated copies of Indent, where the + // number of copies is the current indentation depth. + Indent string + + // AnyResolver is used to resolve the google.protobuf.Any well-known type. + // If unset, the global registry is used by default. + AnyResolver AnyResolver +} + +// JSONPBMarshaler is implemented by protobuf messages that customize the +// way they are marshaled to JSON. Messages that implement this should also +// implement JSONPBUnmarshaler so that the custom format can be parsed. +// +// The JSON marshaling must follow the proto to JSON specification: +// https://developers.google.com/protocol-buffers/docs/proto3#json +// +// Deprecated: Custom types should implement protobuf reflection instead. +type JSONPBMarshaler interface { + MarshalJSONPB(*Marshaler) ([]byte, error) +} + +// Marshal serializes a protobuf message as JSON into w. +func (jm *Marshaler) Marshal(w io.Writer, m proto.Message) error { + b, err := jm.marshal(m) + if len(b) > 0 { + if _, err := w.Write(b); err != nil { + return err + } + } + return err +} + +// MarshalToString serializes a protobuf message as JSON in string form. +func (jm *Marshaler) MarshalToString(m proto.Message) (string, error) { + b, err := jm.marshal(m) + if err != nil { + return "", err + } + return string(b), nil +} + +func (jm *Marshaler) marshal(m proto.Message) ([]byte, error) { + v := reflect.ValueOf(m) + if m == nil || (v.Kind() == reflect.Ptr && v.IsNil()) { + return nil, errors.New("Marshal called with nil") + } + + // Check for custom marshalers first since they may not properly + // implement protobuf reflection that the logic below relies on. + if jsm, ok := m.(JSONPBMarshaler); ok { + return jsm.MarshalJSONPB(jm) + } + + if wrapJSONMarshalV2 { + opts := protojson.MarshalOptions{ + UseProtoNames: jm.OrigName, + UseEnumNumbers: jm.EnumsAsInts, + EmitUnpopulated: jm.EmitDefaults, + Indent: jm.Indent, + } + if jm.AnyResolver != nil { + opts.Resolver = anyResolver{jm.AnyResolver} + } + return opts.Marshal(proto.MessageReflect(m).Interface()) + } else { + // Check for unpopulated required fields first. + m2 := proto.MessageReflect(m) + if err := protoV2.CheckInitialized(m2.Interface()); err != nil { + return nil, err + } + + w := jsonWriter{Marshaler: jm} + err := w.marshalMessage(m2, "", "") + return w.buf, err + } +} + +type jsonWriter struct { + *Marshaler + buf []byte +} + +func (w *jsonWriter) write(s string) { + w.buf = append(w.buf, s...) +} + +func (w *jsonWriter) marshalMessage(m protoreflect.Message, indent, typeURL string) error { + if jsm, ok := proto.MessageV1(m.Interface()).(JSONPBMarshaler); ok { + b, err := jsm.MarshalJSONPB(w.Marshaler) + if err != nil { + return err + } + if typeURL != "" { + // we are marshaling this object to an Any type + var js map[string]*json.RawMessage + if err = json.Unmarshal(b, &js); err != nil { + return fmt.Errorf("type %T produced invalid JSON: %v", m.Interface(), err) + } + turl, err := json.Marshal(typeURL) + if err != nil { + return fmt.Errorf("failed to marshal type URL %q to JSON: %v", typeURL, err) + } + js["@type"] = (*json.RawMessage)(&turl) + if b, err = json.Marshal(js); err != nil { + return err + } + } + w.write(string(b)) + return nil + } + + md := m.Descriptor() + fds := md.Fields() + + // Handle well-known types. + const secondInNanos = int64(time.Second / time.Nanosecond) + switch wellKnownType(md.FullName()) { + case "Any": + return w.marshalAny(m, indent) + case "BoolValue", "BytesValue", "StringValue", + "Int32Value", "UInt32Value", "FloatValue", + "Int64Value", "UInt64Value", "DoubleValue": + fd := fds.ByNumber(1) + return w.marshalValue(fd, m.Get(fd), indent) + case "Duration": + const maxSecondsInDuration = 315576000000 + // "Generated output always contains 0, 3, 6, or 9 fractional digits, + // depending on required precision." + s := m.Get(fds.ByNumber(1)).Int() + ns := m.Get(fds.ByNumber(2)).Int() + if s < -maxSecondsInDuration || s > maxSecondsInDuration { + return fmt.Errorf("seconds out of range %v", s) + } + if ns <= -secondInNanos || ns >= secondInNanos { + return fmt.Errorf("ns out of range (%v, %v)", -secondInNanos, secondInNanos) + } + if (s > 0 && ns < 0) || (s < 0 && ns > 0) { + return errors.New("signs of seconds and nanos do not match") + } + var sign string + if s < 0 || ns < 0 { + sign, s, ns = "-", -1*s, -1*ns + } + x := fmt.Sprintf("%s%d.%09d", sign, s, ns) + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, ".000") + w.write(fmt.Sprintf(`"%vs"`, x)) + return nil + case "Timestamp": + // "RFC 3339, where generated output will always be Z-normalized + // and uses 0, 3, 6 or 9 fractional digits." + s := m.Get(fds.ByNumber(1)).Int() + ns := m.Get(fds.ByNumber(2)).Int() + if ns < 0 || ns >= secondInNanos { + return fmt.Errorf("ns out of range [0, %v)", secondInNanos) + } + t := time.Unix(s, ns).UTC() + // time.RFC3339Nano isn't exactly right (we need to get 3/6/9 fractional digits). + x := t.Format("2006-01-02T15:04:05.000000000") + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, ".000") + w.write(fmt.Sprintf(`"%vZ"`, x)) + return nil + case "Value": + // JSON value; which is a null, number, string, bool, object, or array. + od := md.Oneofs().Get(0) + fd := m.WhichOneof(od) + if fd == nil { + return errors.New("nil Value") + } + return w.marshalValue(fd, m.Get(fd), indent) + case "Struct", "ListValue": + // JSON object or array. + fd := fds.ByNumber(1) + return w.marshalValue(fd, m.Get(fd), indent) + } + + w.write("{") + if w.Indent != "" { + w.write("\n") + } + + firstField := true + if typeURL != "" { + if err := w.marshalTypeURL(indent, typeURL); err != nil { + return err + } + firstField = false + } + + for i := 0; i < fds.Len(); { + fd := fds.Get(i) + if od := fd.ContainingOneof(); od != nil { + fd = m.WhichOneof(od) + i += od.Fields().Len() + if fd == nil { + continue + } + } else { + i++ + } + + v := m.Get(fd) + + if !m.Has(fd) { + if !w.EmitDefaults || fd.ContainingOneof() != nil { + continue + } + if fd.Cardinality() != protoreflect.Repeated && (fd.Message() != nil || fd.Syntax() == protoreflect.Proto2) { + v = protoreflect.Value{} // use "null" for singular messages or proto2 scalars + } + } + + if !firstField { + w.writeComma() + } + if err := w.marshalField(fd, v, indent); err != nil { + return err + } + firstField = false + } + + // Handle proto2 extensions. + if md.ExtensionRanges().Len() > 0 { + // Collect a sorted list of all extension descriptor and values. + type ext struct { + desc protoreflect.FieldDescriptor + val protoreflect.Value + } + var exts []ext + m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + if fd.IsExtension() { + exts = append(exts, ext{fd, v}) + } + return true + }) + sort.Slice(exts, func(i, j int) bool { + return exts[i].desc.Number() < exts[j].desc.Number() + }) + + for _, ext := range exts { + if !firstField { + w.writeComma() + } + if err := w.marshalField(ext.desc, ext.val, indent); err != nil { + return err + } + firstField = false + } + } + + if w.Indent != "" { + w.write("\n") + w.write(indent) + } + w.write("}") + return nil +} + +func (w *jsonWriter) writeComma() { + if w.Indent != "" { + w.write(",\n") + } else { + w.write(",") + } +} + +func (w *jsonWriter) marshalAny(m protoreflect.Message, indent string) error { + // "If the Any contains a value that has a special JSON mapping, + // it will be converted as follows: {"@type": xxx, "value": yyy}. + // Otherwise, the value will be converted into a JSON object, + // and the "@type" field will be inserted to indicate the actual data type." + md := m.Descriptor() + typeURL := m.Get(md.Fields().ByNumber(1)).String() + rawVal := m.Get(md.Fields().ByNumber(2)).Bytes() + + var m2 protoreflect.Message + if w.AnyResolver != nil { + mi, err := w.AnyResolver.Resolve(typeURL) + if err != nil { + return err + } + m2 = proto.MessageReflect(mi) + } else { + mt, err := protoregistry.GlobalTypes.FindMessageByURL(typeURL) + if err != nil { + return err + } + m2 = mt.New() + } + + if err := protoV2.Unmarshal(rawVal, m2.Interface()); err != nil { + return err + } + + if wellKnownType(m2.Descriptor().FullName()) == "" { + return w.marshalMessage(m2, indent, typeURL) + } + + w.write("{") + if w.Indent != "" { + w.write("\n") + } + if err := w.marshalTypeURL(indent, typeURL); err != nil { + return err + } + w.writeComma() + if w.Indent != "" { + w.write(indent) + w.write(w.Indent) + w.write(`"value": `) + } else { + w.write(`"value":`) + } + if err := w.marshalMessage(m2, indent+w.Indent, ""); err != nil { + return err + } + if w.Indent != "" { + w.write("\n") + w.write(indent) + } + w.write("}") + return nil +} + +func (w *jsonWriter) marshalTypeURL(indent, typeURL string) error { + if w.Indent != "" { + w.write(indent) + w.write(w.Indent) + } + w.write(`"@type":`) + if w.Indent != "" { + w.write(" ") + } + b, err := json.Marshal(typeURL) + if err != nil { + return err + } + w.write(string(b)) + return nil +} + +// marshalField writes field description and value to the Writer. +func (w *jsonWriter) marshalField(fd protoreflect.FieldDescriptor, v protoreflect.Value, indent string) error { + if w.Indent != "" { + w.write(indent) + w.write(w.Indent) + } + w.write(`"`) + switch { + case fd.IsExtension(): + // For message set, use the fname of the message as the extension name. + name := string(fd.FullName()) + if isMessageSet(fd.ContainingMessage()) { + name = strings.TrimSuffix(name, ".message_set_extension") + } + + w.write("[" + name + "]") + case w.OrigName: + name := string(fd.Name()) + if fd.Kind() == protoreflect.GroupKind { + name = string(fd.Message().Name()) + } + w.write(name) + default: + w.write(string(fd.JSONName())) + } + w.write(`":`) + if w.Indent != "" { + w.write(" ") + } + return w.marshalValue(fd, v, indent) +} + +func (w *jsonWriter) marshalValue(fd protoreflect.FieldDescriptor, v protoreflect.Value, indent string) error { + switch { + case fd.IsList(): + w.write("[") + comma := "" + lv := v.List() + for i := 0; i < lv.Len(); i++ { + w.write(comma) + if w.Indent != "" { + w.write("\n") + w.write(indent) + w.write(w.Indent) + w.write(w.Indent) + } + if err := w.marshalSingularValue(fd, lv.Get(i), indent+w.Indent); err != nil { + return err + } + comma = "," + } + if w.Indent != "" { + w.write("\n") + w.write(indent) + w.write(w.Indent) + } + w.write("]") + return nil + case fd.IsMap(): + kfd := fd.MapKey() + vfd := fd.MapValue() + mv := v.Map() + + // Collect a sorted list of all map keys and values. + type entry struct{ key, val protoreflect.Value } + var entries []entry + mv.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool { + entries = append(entries, entry{k.Value(), v}) + return true + }) + sort.Slice(entries, func(i, j int) bool { + switch kfd.Kind() { + case protoreflect.BoolKind: + return !entries[i].key.Bool() && entries[j].key.Bool() + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind, protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: + return entries[i].key.Int() < entries[j].key.Int() + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind, protoreflect.Uint64Kind, protoreflect.Fixed64Kind: + return entries[i].key.Uint() < entries[j].key.Uint() + case protoreflect.StringKind: + return entries[i].key.String() < entries[j].key.String() + default: + panic("invalid kind") + } + }) + + w.write(`{`) + comma := "" + for _, entry := range entries { + w.write(comma) + if w.Indent != "" { + w.write("\n") + w.write(indent) + w.write(w.Indent) + w.write(w.Indent) + } + + s := fmt.Sprint(entry.key.Interface()) + b, err := json.Marshal(s) + if err != nil { + return err + } + w.write(string(b)) + + w.write(`:`) + if w.Indent != "" { + w.write(` `) + } + + if err := w.marshalSingularValue(vfd, entry.val, indent+w.Indent); err != nil { + return err + } + comma = "," + } + if w.Indent != "" { + w.write("\n") + w.write(indent) + w.write(w.Indent) + } + w.write(`}`) + return nil + default: + return w.marshalSingularValue(fd, v, indent) + } +} + +func (w *jsonWriter) marshalSingularValue(fd protoreflect.FieldDescriptor, v protoreflect.Value, indent string) error { + switch { + case !v.IsValid(): + w.write("null") + return nil + case fd.Message() != nil: + return w.marshalMessage(v.Message(), indent+w.Indent, "") + case fd.Enum() != nil: + if fd.Enum().FullName() == "google.protobuf.NullValue" { + w.write("null") + return nil + } + + vd := fd.Enum().Values().ByNumber(v.Enum()) + if vd == nil || w.EnumsAsInts { + w.write(strconv.Itoa(int(v.Enum()))) + } else { + w.write(`"` + string(vd.Name()) + `"`) + } + return nil + default: + switch v.Interface().(type) { + case float32, float64: + switch { + case math.IsInf(v.Float(), +1): + w.write(`"Infinity"`) + return nil + case math.IsInf(v.Float(), -1): + w.write(`"-Infinity"`) + return nil + case math.IsNaN(v.Float()): + w.write(`"NaN"`) + return nil + } + case int64, uint64: + w.write(fmt.Sprintf(`"%d"`, v.Interface())) + return nil + } + + b, err := json.Marshal(v.Interface()) + if err != nil { + return err + } + w.write(string(b)) + return nil + } +} diff --git a/vendor/github.com/golang/protobuf/jsonpb/json.go b/vendor/github.com/golang/protobuf/jsonpb/json.go new file mode 100644 index 000000000..480e2448d --- /dev/null +++ b/vendor/github.com/golang/protobuf/jsonpb/json.go @@ -0,0 +1,69 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package jsonpb provides functionality to marshal and unmarshal between a +// protocol buffer message and JSON. It follows the specification at +// https://developers.google.com/protocol-buffers/docs/proto3#json. +// +// Do not rely on the default behavior of the standard encoding/json package +// when called on generated message types as it does not operate correctly. +// +// Deprecated: Use the "google.golang.org/protobuf/encoding/protojson" +// package instead. +package jsonpb + +import ( + "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/runtime/protoimpl" +) + +// AnyResolver takes a type URL, present in an Any message, +// and resolves it into an instance of the associated message. +type AnyResolver interface { + Resolve(typeURL string) (proto.Message, error) +} + +type anyResolver struct{ AnyResolver } + +func (r anyResolver) FindMessageByName(message protoreflect.FullName) (protoreflect.MessageType, error) { + return r.FindMessageByURL(string(message)) +} + +func (r anyResolver) FindMessageByURL(url string) (protoreflect.MessageType, error) { + m, err := r.Resolve(url) + if err != nil { + return nil, err + } + return protoimpl.X.MessageTypeOf(m), nil +} + +func (r anyResolver) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) { + return protoregistry.GlobalTypes.FindExtensionByName(field) +} + +func (r anyResolver) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) { + return protoregistry.GlobalTypes.FindExtensionByNumber(message, field) +} + +func wellKnownType(s protoreflect.FullName) string { + if s.Parent() == "google.protobuf" { + switch s.Name() { + case "Empty", "Any", + "BoolValue", "BytesValue", "StringValue", + "Int32Value", "UInt32Value", "FloatValue", + "Int64Value", "UInt64Value", "DoubleValue", + "Duration", "Timestamp", + "NullValue", "Struct", "Value", "ListValue": + return string(s.Name()) + } + } + return "" +} + +func isMessageSet(md protoreflect.MessageDescriptor) bool { + ms, ok := md.(interface{ IsMessageSet() bool }) + return ok && ms.IsMessageSet() +} diff --git a/vendor/github.com/google/go-cmp/cmp/compare.go b/vendor/github.com/google/go-cmp/cmp/compare.go index fd2b3a42b..087320da7 100644 --- a/vendor/github.com/google/go-cmp/cmp/compare.go +++ b/vendor/github.com/google/go-cmp/cmp/compare.go @@ -13,21 +13,21 @@ // // The primary features of cmp are: // -// • When the default behavior of equality does not suit the needs of the test, -// custom equality functions can override the equality operation. -// For example, an equality function may report floats as equal so long as they -// are within some tolerance of each other. +// - When the default behavior of equality does not suit the test's needs, +// custom equality functions can override the equality operation. +// For example, an equality function may report floats as equal so long as +// they are within some tolerance of each other. // -// • Types that have an Equal method may use that method to determine equality. -// This allows package authors to determine the equality operation for the types -// that they define. +// - Types with an Equal method may use that method to determine equality. +// This allows package authors to determine the equality operation +// for the types that they define. // -// • If no custom equality functions are used and no Equal method is defined, -// equality is determined by recursively comparing the primitive kinds on both -// values, much like reflect.DeepEqual. Unlike reflect.DeepEqual, unexported -// fields are not compared by default; they result in panics unless suppressed -// by using an Ignore option (see cmpopts.IgnoreUnexported) or explicitly -// compared using the Exporter option. +// - If no custom equality functions are used and no Equal method is defined, +// equality is determined by recursively comparing the primitive kinds on +// both values, much like reflect.DeepEqual. Unlike reflect.DeepEqual, +// unexported fields are not compared by default; they result in panics +// unless suppressed by using an Ignore option (see cmpopts.IgnoreUnexported) +// or explicitly compared using the Exporter option. package cmp import ( @@ -45,25 +45,25 @@ import ( // Equal reports whether x and y are equal by recursively applying the // following rules in the given order to x and y and all of their sub-values: // -// • Let S be the set of all Ignore, Transformer, and Comparer options that -// remain after applying all path filters, value filters, and type filters. -// If at least one Ignore exists in S, then the comparison is ignored. -// If the number of Transformer and Comparer options in S is greater than one, -// then Equal panics because it is ambiguous which option to use. -// If S contains a single Transformer, then use that to transform the current -// values and recursively call Equal on the output values. -// If S contains a single Comparer, then use that to compare the current values. -// Otherwise, evaluation proceeds to the next rule. +// - Let S be the set of all Ignore, Transformer, and Comparer options that +// remain after applying all path filters, value filters, and type filters. +// If at least one Ignore exists in S, then the comparison is ignored. +// If the number of Transformer and Comparer options in S is non-zero, +// then Equal panics because it is ambiguous which option to use. +// If S contains a single Transformer, then use that to transform +// the current values and recursively call Equal on the output values. +// If S contains a single Comparer, then use that to compare the current values. +// Otherwise, evaluation proceeds to the next rule. // -// • If the values have an Equal method of the form "(T) Equal(T) bool" or -// "(T) Equal(I) bool" where T is assignable to I, then use the result of -// x.Equal(y) even if x or y is nil. Otherwise, no such method exists and -// evaluation proceeds to the next rule. +// - If the values have an Equal method of the form "(T) Equal(T) bool" or +// "(T) Equal(I) bool" where T is assignable to I, then use the result of +// x.Equal(y) even if x or y is nil. Otherwise, no such method exists and +// evaluation proceeds to the next rule. // -// • Lastly, try to compare x and y based on their basic kinds. -// Simple kinds like booleans, integers, floats, complex numbers, strings, and -// channels are compared using the equivalent of the == operator in Go. -// Functions are only equal if they are both nil, otherwise they are unequal. +// - Lastly, try to compare x and y based on their basic kinds. +// Simple kinds like booleans, integers, floats, complex numbers, strings, +// and channels are compared using the equivalent of the == operator in Go. +// Functions are only equal if they are both nil, otherwise they are unequal. // // Structs are equal if recursively calling Equal on all fields report equal. // If a struct contains unexported fields, Equal panics unless an Ignore option @@ -144,7 +144,7 @@ func rootStep(x, y interface{}) PathStep { // so that they have the same parent type. var t reflect.Type if !vx.IsValid() || !vy.IsValid() || vx.Type() != vy.Type() { - t = reflect.TypeOf((*interface{})(nil)).Elem() + t = anyType if vx.IsValid() { vvx := reflect.New(t).Elem() vvx.Set(vx) @@ -639,7 +639,9 @@ type dynChecker struct{ curr, next int } // Next increments the state and reports whether a check should be performed. // // Checks occur every Nth function call, where N is a triangular number: +// // 0 1 3 6 10 15 21 28 36 45 55 66 78 91 105 120 136 153 171 190 ... +// // See https://en.wikipedia.org/wiki/Triangular_number // // This sequence ensures that the cost of checks drops significantly as diff --git a/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go b/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go index bc196b16c..a248e5436 100644 --- a/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go +++ b/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go @@ -127,9 +127,9 @@ var randBool = rand.New(rand.NewSource(time.Now().Unix())).Intn(2) == 0 // This function returns an edit-script, which is a sequence of operations // needed to convert one list into the other. The following invariants for // the edit-script are maintained: -// • eq == (es.Dist()==0) -// • nx == es.LenX() -// • ny == es.LenY() +// - eq == (es.Dist()==0) +// - nx == es.LenX() +// - ny == es.LenY() // // This algorithm is not guaranteed to be an optimal solution (i.e., one that // produces an edit-script with a minimal Levenshtein distance). This algorithm @@ -169,12 +169,13 @@ func Difference(nx, ny int, f EqualFunc) (es EditScript) { // A diagonal edge is equivalent to a matching symbol between both X and Y. // Invariants: - // • 0 ≤ fwdPath.X ≤ (fwdFrontier.X, revFrontier.X) ≤ revPath.X ≤ nx - // • 0 ≤ fwdPath.Y ≤ (fwdFrontier.Y, revFrontier.Y) ≤ revPath.Y ≤ ny + // - 0 ≤ fwdPath.X ≤ (fwdFrontier.X, revFrontier.X) ≤ revPath.X ≤ nx + // - 0 ≤ fwdPath.Y ≤ (fwdFrontier.Y, revFrontier.Y) ≤ revPath.Y ≤ ny // // In general: - // • fwdFrontier.X < revFrontier.X - // • fwdFrontier.Y < revFrontier.Y + // - fwdFrontier.X < revFrontier.X + // - fwdFrontier.Y < revFrontier.Y + // // Unless, it is time for the algorithm to terminate. fwdPath := path{+1, point{0, 0}, make(EditScript, 0, (nx+ny)/2)} revPath := path{-1, point{nx, ny}, make(EditScript, 0)} @@ -195,19 +196,21 @@ func Difference(nx, ny int, f EqualFunc) (es EditScript) { // computing sub-optimal edit-scripts between two lists. // // The algorithm is approximately as follows: - // • Searching for differences switches back-and-forth between - // a search that starts at the beginning (the top-left corner), and - // a search that starts at the end (the bottom-right corner). The goal of - // the search is connect with the search from the opposite corner. - // • As we search, we build a path in a greedy manner, where the first - // match seen is added to the path (this is sub-optimal, but provides a - // decent result in practice). When matches are found, we try the next pair - // of symbols in the lists and follow all matches as far as possible. - // • When searching for matches, we search along a diagonal going through - // through the "frontier" point. If no matches are found, we advance the - // frontier towards the opposite corner. - // • This algorithm terminates when either the X coordinates or the - // Y coordinates of the forward and reverse frontier points ever intersect. + // - Searching for differences switches back-and-forth between + // a search that starts at the beginning (the top-left corner), and + // a search that starts at the end (the bottom-right corner). + // The goal of the search is connect with the search + // from the opposite corner. + // - As we search, we build a path in a greedy manner, + // where the first match seen is added to the path (this is sub-optimal, + // but provides a decent result in practice). When matches are found, + // we try the next pair of symbols in the lists and follow all matches + // as far as possible. + // - When searching for matches, we search along a diagonal going through + // through the "frontier" point. If no matches are found, + // we advance the frontier towards the opposite corner. + // - This algorithm terminates when either the X coordinates or the + // Y coordinates of the forward and reverse frontier points ever intersect. // This algorithm is correct even if searching only in the forward direction // or in the reverse direction. We do both because it is commonly observed @@ -389,6 +392,7 @@ type point struct{ X, Y int } func (p *point) add(dx, dy int) { p.X += dx; p.Y += dy } // zigzag maps a consecutive sequence of integers to a zig-zag sequence. +// // [0 1 2 3 4 5 ...] => [0 -1 +1 -2 +2 ...] func zigzag(x int) int { if x&1 != 0 { diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/zero.go b/vendor/github.com/google/go-cmp/cmp/internal/value/zero.go deleted file mode 100644 index 9147a2997..000000000 --- a/vendor/github.com/google/go-cmp/cmp/internal/value/zero.go +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright 2017, The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package value - -import ( - "math" - "reflect" -) - -// IsZero reports whether v is the zero value. -// This does not rely on Interface and so can be used on unexported fields. -func IsZero(v reflect.Value) bool { - switch v.Kind() { - case reflect.Bool: - return v.Bool() == false - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return math.Float64bits(v.Float()) == 0 - case reflect.Complex64, reflect.Complex128: - return math.Float64bits(real(v.Complex())) == 0 && math.Float64bits(imag(v.Complex())) == 0 - case reflect.String: - return v.String() == "" - case reflect.UnsafePointer: - return v.Pointer() == 0 - case reflect.Chan, reflect.Func, reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice: - return v.IsNil() - case reflect.Array: - for i := 0; i < v.Len(); i++ { - if !IsZero(v.Index(i)) { - return false - } - } - return true - case reflect.Struct: - for i := 0; i < v.NumField(); i++ { - if !IsZero(v.Field(i)) { - return false - } - } - return true - } - return false -} diff --git a/vendor/github.com/google/go-cmp/cmp/options.go b/vendor/github.com/google/go-cmp/cmp/options.go index e57b9eb53..1f9ca9c48 100644 --- a/vendor/github.com/google/go-cmp/cmp/options.go +++ b/vendor/github.com/google/go-cmp/cmp/options.go @@ -33,6 +33,7 @@ type Option interface { } // applicableOption represents the following types: +// // Fundamental: ignore | validator | *comparer | *transformer // Grouping: Options type applicableOption interface { @@ -43,6 +44,7 @@ type applicableOption interface { } // coreOption represents the following types: +// // Fundamental: ignore | validator | *comparer | *transformer // Filters: *pathFilter | *valuesFilter type coreOption interface { @@ -336,9 +338,9 @@ func (tr transformer) String() string { // both implement T. // // The equality function must be: -// • Symmetric: equal(x, y) == equal(y, x) -// • Deterministic: equal(x, y) == equal(x, y) -// • Pure: equal(x, y) does not modify x or y +// - Symmetric: equal(x, y) == equal(y, x) +// - Deterministic: equal(x, y) == equal(x, y) +// - Pure: equal(x, y) does not modify x or y func Comparer(f interface{}) Option { v := reflect.ValueOf(f) if !function.IsType(v.Type(), function.Equal) || v.IsNil() { @@ -430,7 +432,7 @@ func AllowUnexported(types ...interface{}) Option { } // Result represents the comparison result for a single node and -// is provided by cmp when calling Result (see Reporter). +// is provided by cmp when calling Report (see Reporter). type Result struct { _ [0]func() // Make Result incomparable flags resultFlags diff --git a/vendor/github.com/google/go-cmp/cmp/path.go b/vendor/github.com/google/go-cmp/cmp/path.go index c71003463..a0a588502 100644 --- a/vendor/github.com/google/go-cmp/cmp/path.go +++ b/vendor/github.com/google/go-cmp/cmp/path.go @@ -41,13 +41,13 @@ type PathStep interface { // The type of each valid value is guaranteed to be identical to Type. // // In some cases, one or both may be invalid or have restrictions: - // • For StructField, both are not interface-able if the current field - // is unexported and the struct type is not explicitly permitted by - // an Exporter to traverse unexported fields. - // • For SliceIndex, one may be invalid if an element is missing from - // either the x or y slice. - // • For MapIndex, one may be invalid if an entry is missing from - // either the x or y map. + // - For StructField, both are not interface-able if the current field + // is unexported and the struct type is not explicitly permitted by + // an Exporter to traverse unexported fields. + // - For SliceIndex, one may be invalid if an element is missing from + // either the x or y slice. + // - For MapIndex, one may be invalid if an entry is missing from + // either the x or y map. // // The provided values must not be mutated. Values() (vx, vy reflect.Value) @@ -94,6 +94,7 @@ func (pa Path) Index(i int) PathStep { // The simplified path only contains struct field accesses. // // For example: +// // MyMap.MySlices.MyField func (pa Path) String() string { var ss []string @@ -108,6 +109,7 @@ func (pa Path) String() string { // GoString returns the path to a specific node using Go syntax. // // For example: +// // (*root.MyMap["key"].(*mypkg.MyStruct).MySlices)[2][3].MyField func (pa Path) GoString() string { var ssPre, ssPost []string @@ -159,7 +161,7 @@ func (ps pathStep) String() string { if ps.typ == nil { return "" } - s := ps.typ.String() + s := value.TypeString(ps.typ, false) if s == "" || strings.ContainsAny(s, "{}\n") { return "root" // Type too simple or complex to print } @@ -282,7 +284,7 @@ type typeAssertion struct { func (ta TypeAssertion) Type() reflect.Type { return ta.typ } func (ta TypeAssertion) Values() (vx, vy reflect.Value) { return ta.vx, ta.vy } -func (ta TypeAssertion) String() string { return fmt.Sprintf(".(%v)", ta.typ) } +func (ta TypeAssertion) String() string { return fmt.Sprintf(".(%v)", value.TypeString(ta.typ, false)) } // Transform is a transformation from the parent type to the current type. type Transform struct{ *transform } diff --git a/vendor/github.com/google/go-cmp/cmp/report_compare.go b/vendor/github.com/google/go-cmp/cmp/report_compare.go index 1ef65ac1d..2050bf6b4 100644 --- a/vendor/github.com/google/go-cmp/cmp/report_compare.go +++ b/vendor/github.com/google/go-cmp/cmp/report_compare.go @@ -7,8 +7,6 @@ package cmp import ( "fmt" "reflect" - - "github.com/google/go-cmp/cmp/internal/value" ) // numContextRecords is the number of surrounding equal records to print. @@ -117,7 +115,7 @@ func (opts formatOptions) FormatDiff(v *valueNode, ptrs *pointerReferences) (out // For leaf nodes, format the value based on the reflect.Values alone. // As a special case, treat equal []byte as a leaf nodes. - isBytes := v.Type.Kind() == reflect.Slice && v.Type.Elem() == reflect.TypeOf(byte(0)) + isBytes := v.Type.Kind() == reflect.Slice && v.Type.Elem() == byteType isEqualBytes := isBytes && v.NumDiff+v.NumIgnored+v.NumTransformed == 0 if v.MaxDepth == 0 || isEqualBytes { switch opts.DiffMode { @@ -248,11 +246,11 @@ func (opts formatOptions) formatDiffList(recs []reportRecord, k reflect.Kind, pt var isZero bool switch opts.DiffMode { case diffIdentical: - isZero = value.IsZero(r.Value.ValueX) || value.IsZero(r.Value.ValueY) + isZero = r.Value.ValueX.IsZero() || r.Value.ValueY.IsZero() case diffRemoved: - isZero = value.IsZero(r.Value.ValueX) + isZero = r.Value.ValueX.IsZero() case diffInserted: - isZero = value.IsZero(r.Value.ValueY) + isZero = r.Value.ValueY.IsZero() } if isZero { continue diff --git a/vendor/github.com/google/go-cmp/cmp/report_reflect.go b/vendor/github.com/google/go-cmp/cmp/report_reflect.go index 287b89358..2ab41fad3 100644 --- a/vendor/github.com/google/go-cmp/cmp/report_reflect.go +++ b/vendor/github.com/google/go-cmp/cmp/report_reflect.go @@ -16,6 +16,13 @@ import ( "github.com/google/go-cmp/cmp/internal/value" ) +var ( + anyType = reflect.TypeOf((*interface{})(nil)).Elem() + stringType = reflect.TypeOf((*string)(nil)).Elem() + bytesType = reflect.TypeOf((*[]byte)(nil)).Elem() + byteType = reflect.TypeOf((*byte)(nil)).Elem() +) + type formatValueOptions struct { // AvoidStringer controls whether to avoid calling custom stringer // methods like error.Error or fmt.Stringer.String. @@ -184,7 +191,7 @@ func (opts formatOptions) FormatValue(v reflect.Value, parentKind reflect.Kind, } for i := 0; i < v.NumField(); i++ { vv := v.Field(i) - if value.IsZero(vv) { + if vv.IsZero() { continue // Elide fields with zero values } if len(list) == maxLen { @@ -205,7 +212,7 @@ func (opts formatOptions) FormatValue(v reflect.Value, parentKind reflect.Kind, } // Check whether this is a []byte of text data. - if t.Elem() == reflect.TypeOf(byte(0)) { + if t.Elem() == byteType { b := v.Bytes() isPrintSpace := func(r rune) bool { return unicode.IsPrint(r) || unicode.IsSpace(r) } if len(b) > 0 && utf8.Valid(b) && len(bytes.TrimFunc(b, isPrintSpace)) == 0 { diff --git a/vendor/github.com/google/go-cmp/cmp/report_slices.go b/vendor/github.com/google/go-cmp/cmp/report_slices.go index 68b5c1ae1..23e444f62 100644 --- a/vendor/github.com/google/go-cmp/cmp/report_slices.go +++ b/vendor/github.com/google/go-cmp/cmp/report_slices.go @@ -104,7 +104,7 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { case t.Kind() == reflect.String: sx, sy = vx.String(), vy.String() isString = true - case t.Kind() == reflect.Slice && t.Elem() == reflect.TypeOf(byte(0)): + case t.Kind() == reflect.Slice && t.Elem() == byteType: sx, sy = string(vx.Bytes()), string(vy.Bytes()) isString = true case t.Kind() == reflect.Array: @@ -147,7 +147,10 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { }) efficiencyLines := float64(esLines.Dist()) / float64(len(esLines)) efficiencyBytes := float64(esBytes.Dist()) / float64(len(esBytes)) - isPureLinedText = efficiencyLines < 4*efficiencyBytes + quotedLength := len(strconv.Quote(sx + sy)) + unquotedLength := len(sx) + len(sy) + escapeExpansionRatio := float64(quotedLength) / float64(unquotedLength) + isPureLinedText = efficiencyLines < 4*efficiencyBytes || escapeExpansionRatio > 1.1 } } @@ -171,12 +174,13 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { // differences in a string literal. This format is more readable, // but has edge-cases where differences are visually indistinguishable. // This format is avoided under the following conditions: - // • A line starts with `"""` - // • A line starts with "..." - // • A line contains non-printable characters - // • Adjacent different lines differ only by whitespace + // - A line starts with `"""` + // - A line starts with "..." + // - A line contains non-printable characters + // - Adjacent different lines differ only by whitespace // // For example: + // // """ // ... // 3 identical lines // foo @@ -231,7 +235,7 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { var out textNode = &textWrap{Prefix: "(", Value: list2, Suffix: ")"} switch t.Kind() { case reflect.String: - if t != reflect.TypeOf(string("")) { + if t != stringType { out = opts.FormatType(t, out) } case reflect.Slice: @@ -326,12 +330,12 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { switch t.Kind() { case reflect.String: out = &textWrap{Prefix: "strings.Join(", Value: out, Suffix: fmt.Sprintf(", %q)", delim)} - if t != reflect.TypeOf(string("")) { + if t != stringType { out = opts.FormatType(t, out) } case reflect.Slice: out = &textWrap{Prefix: "bytes.Join(", Value: out, Suffix: fmt.Sprintf(", %q)", delim)} - if t != reflect.TypeOf([]byte(nil)) { + if t != bytesType { out = opts.FormatType(t, out) } } @@ -446,7 +450,6 @@ func (opts formatOptions) formatDiffSlice( // {NumIdentical: 3}, // {NumInserted: 1}, // ] -// func coalesceAdjacentEdits(name string, es diff.EditScript) (groups []diffStats) { var prevMode byte lastStats := func(mode byte) *diffStats { @@ -503,7 +506,6 @@ func coalesceAdjacentEdits(name string, es diff.EditScript) (groups []diffStats) // {NumIdentical: 8, NumRemoved: 12, NumInserted: 3}, // {NumIdentical: 63}, // ] -// func coalesceInterveningIdentical(groups []diffStats, windowSize int) []diffStats { groups, groupsOrig := groups[:0], groups for i, ds := range groupsOrig { @@ -548,7 +550,6 @@ func coalesceInterveningIdentical(groups []diffStats, windowSize int) []diffStat // {NumRemoved: 9}, // {NumIdentical: 64}, // incremented by 10 // ] -// func cleanupSurroundingIdentical(groups []diffStats, eq func(i, j int) bool) []diffStats { var ix, iy int // indexes into sequence x and y for i, ds := range groups { diff --git a/vendor/github.com/google/go-cmp/cmp/report_text.go b/vendor/github.com/google/go-cmp/cmp/report_text.go index 0fd46d7ff..388fcf571 100644 --- a/vendor/github.com/google/go-cmp/cmp/report_text.go +++ b/vendor/github.com/google/go-cmp/cmp/report_text.go @@ -393,6 +393,7 @@ func (s diffStats) Append(ds diffStats) diffStats { // String prints a humanly-readable summary of coalesced records. // // Example: +// // diffStats{Name: "Field", NumIgnored: 5}.String() => "5 ignored fields" func (s diffStats) String() string { var ss []string diff --git a/vendor/github.com/googleapis/enterprise-certificate-proxy/client/client.go b/vendor/github.com/googleapis/enterprise-certificate-proxy/client/client.go index 81f54d5ef..b3283b815 100644 --- a/vendor/github.com/googleapis/enterprise-certificate-proxy/client/client.go +++ b/vendor/github.com/googleapis/enterprise-certificate-proxy/client/client.go @@ -1,16 +1,28 @@ // Copyright 2022 Google LLC. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package client is a cross-platform client for the signer binary (a.k.a."EnterpriseCertSigner"). // -// Client is a cross-platform client for the signer binary (a.k.a."EnterpriseCertSigner"). // The signer binary is OS-specific, but exposes a standard set of APIs for the client to use. package client import ( "crypto" + "crypto/ecdsa" "crypto/rsa" "crypto/x509" "encoding/gob" + "errors" "fmt" "io" "net/rpc" @@ -67,14 +79,16 @@ func (k *Key) CertificateChain() [][]byte { // Close closes the RPC connection and kills the signer subprocess. // Call this to free up resources when the Key object is no longer needed. func (k *Key) Close() error { - if err := k.client.Close(); err != nil { - return fmt.Errorf("failed to close RPC connection: %w", err) - } if err := k.cmd.Process.Kill(); err != nil { return fmt.Errorf("failed to kill signer process: %w", err) } - if err := k.cmd.Wait(); err.Error() != "signal: killed" { - return fmt.Errorf("signer process was not killed: %w", err) + // Wait for cmd to exit and release resources. Since the process is forcefully killed, this + // will return a non-nil error (varies by OS), which we will ignore. + _ = k.cmd.Wait() + // The Pipes connecting the RPC client should have been closed when the signer subprocess was killed. + // Calling `k.client.Close()` before `k.cmd.Process.Kill()` or `k.cmd.Wait()` _will_ cause a segfault. + if err := k.client.Close(); err.Error() != "close |0: file already closed" { + return fmt.Errorf("failed to close RPC connection: %w", err) } return nil } @@ -84,12 +98,19 @@ func (k *Key) Public() crypto.PublicKey { return k.publicKey } -// Sign signs a message by encrypting a message digest, using the specified signer options. +// Sign signs a message digest, using the specified signer options. func (k *Key) Sign(_ io.Reader, digest []byte, opts crypto.SignerOpts) (signed []byte, err error) { + if opts != nil && opts.HashFunc() != 0 && len(digest) != opts.HashFunc().Size() { + return nil, fmt.Errorf("Digest length of %v bytes does not match Hash function size of %v bytes", len(digest), opts.HashFunc().Size()) + } err = k.client.Call(signAPI, SignArgs{Digest: digest, Opts: opts}, &signed) return } +// ErrCredUnavailable is a sentinel error that indicates ECP Cred is unavailable, +// possibly due to missing config or missing binary path. +var ErrCredUnavailable = errors.New("Cred is unavailable") + // Cred spawns a signer subprocess that listens on stdin/stdout to perform certificate // related operations, including signing messages with the private key. // @@ -103,6 +124,9 @@ func Cred(configFilePath string) (*Key, error) { } enterpriseCertSignerPath, err := util.LoadSignerBinaryPath(configFilePath) if err != nil { + if errors.Is(err, util.ErrConfigUnavailable) { + return nil, ErrCredUnavailable + } return nil, err } k := &Key{ @@ -147,5 +171,15 @@ func Cred(configFilePath string) (*Key, error) { return nil, fmt.Errorf("invalid public key type: %T", publicKey) } + switch pub := k.publicKey.(type) { + case *rsa.PublicKey: + if pub.Size() < 256 { + return nil, fmt.Errorf("RSA modulus size is less than 2048 bits: %v", pub.Size()*8) + } + case *ecdsa.PublicKey: + default: + return nil, fmt.Errorf("unsupported public key type: %v", pub) + } + return k, nil } diff --git a/vendor/github.com/googleapis/enterprise-certificate-proxy/client/util/util.go b/vendor/github.com/googleapis/enterprise-certificate-proxy/client/util/util.go index 6b5f2806e..1640ec1c9 100644 --- a/vendor/github.com/googleapis/enterprise-certificate-proxy/client/util/util.go +++ b/vendor/github.com/googleapis/enterprise-certificate-proxy/client/util/util.go @@ -1,17 +1,30 @@ +// Copyright 2022 Google LLC. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + // Package util provides helper functions for the client. package util import ( "encoding/json" "errors" - "io/ioutil" + "io" "os" "os/user" "path/filepath" "runtime" ) -const configFileName = "enterprise_certificate_config.json" +const configFileName = "certificate_config.json" // EnterpriseCertificateConfig contains parameters for initializing signer. type EnterpriseCertificateConfig struct { @@ -20,17 +33,24 @@ type EnterpriseCertificateConfig struct { // Libs specifies the locations of helper libraries. type Libs struct { - SignerBinary string `json:"signer_binary"` + ECP string `json:"ecp"` } +// ErrConfigUnavailable is a sentinel error that indicates ECP config is unavailable, +// possibly due to entire config missing or missing binary path. +var ErrConfigUnavailable = errors.New("Config is unavailable") + // LoadSignerBinaryPath retrieves the path of the signer binary from the config file. func LoadSignerBinaryPath(configFilePath string) (path string, err error) { jsonFile, err := os.Open(configFilePath) if err != nil { + if errors.Is(err, os.ErrNotExist) { + return "", ErrConfigUnavailable + } return "", err } - byteValue, err := ioutil.ReadAll(jsonFile) + byteValue, err := io.ReadAll(jsonFile) if err != nil { return "", err } @@ -39,9 +59,9 @@ func LoadSignerBinaryPath(configFilePath string) (path string, err error) { if err != nil { return "", err } - signerBinaryPath := config.Libs.SignerBinary + signerBinaryPath := config.Libs.ECP if signerBinaryPath == "" { - return "", errors.New("Signer binary path is missing.") + return "", ErrConfigUnavailable } return signerBinaryPath, nil } @@ -61,9 +81,8 @@ func guessHomeDir() string { func getDefaultConfigFileDirectory() (directory string) { if runtime.GOOS == "windows" { return filepath.Join(os.Getenv("APPDATA"), "gcloud") - } else { - return filepath.Join(guessHomeDir(), ".config/gcloud") } + return filepath.Join(guessHomeDir(), ".config/gcloud") } // GetDefaultConfigFilePath returns the default path of the enterprise certificate config file created by gCloud. diff --git a/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json b/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json index 0e643a05b..10295639c 100644 --- a/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json +++ b/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json @@ -1,3 +1,3 @@ { - "v2": "2.4.0" + "v2": "2.7.1" } diff --git a/vendor/github.com/googleapis/gax-go/v2/CHANGES.md b/vendor/github.com/googleapis/gax-go/v2/CHANGES.md index b42ace44c..41a7ca94d 100644 --- a/vendor/github.com/googleapis/gax-go/v2/CHANGES.md +++ b/vendor/github.com/googleapis/gax-go/v2/CHANGES.md @@ -1,5 +1,41 @@ # Changelog +## [2.7.1](https://github.com/googleapis/gax-go/compare/v2.7.0...v2.7.1) (2023-03-06) + + +### Bug Fixes + +* **v2/apierror:** return Unknown GRPCStatus when err source is HTTP ([#260](https://github.com/googleapis/gax-go/issues/260)) ([043b734](https://github.com/googleapis/gax-go/commit/043b73437a240a91229207fb3ee52a9935a36f23)), refs [#254](https://github.com/googleapis/gax-go/issues/254) + +## [2.7.0](https://github.com/googleapis/gax-go/compare/v2.6.0...v2.7.0) (2022-11-02) + + +### Features + +* update google.golang.org/api to latest ([#240](https://github.com/googleapis/gax-go/issues/240)) ([f690a02](https://github.com/googleapis/gax-go/commit/f690a02c806a2903bdee943ede3a58e3a331ebd6)) +* **v2/apierror:** add apierror.FromWrappingError ([#238](https://github.com/googleapis/gax-go/issues/238)) ([9dbd96d](https://github.com/googleapis/gax-go/commit/9dbd96d59b9d54ceb7c025513aa8c1a9d727382f)) + +## [2.6.0](https://github.com/googleapis/gax-go/compare/v2.5.1...v2.6.0) (2022-10-13) + + +### Features + +* **v2:** copy DetermineContentType functionality ([#230](https://github.com/googleapis/gax-go/issues/230)) ([2c52a70](https://github.com/googleapis/gax-go/commit/2c52a70bae965397f740ed27d46aabe89ff249b3)) + +## [2.5.1](https://github.com/googleapis/gax-go/compare/v2.5.0...v2.5.1) (2022-08-04) + + +### Bug Fixes + +* **v2:** resolve bad genproto pseudoversion in go.mod ([#218](https://github.com/googleapis/gax-go/issues/218)) ([1379b27](https://github.com/googleapis/gax-go/commit/1379b27e9846d959f7e1163b9ef298b3c92c8d23)) + +## [2.5.0](https://github.com/googleapis/gax-go/compare/v2.4.0...v2.5.0) (2022-08-04) + + +### Features + +* add ExtractProtoMessage to apierror ([#213](https://github.com/googleapis/gax-go/issues/213)) ([a6ce70c](https://github.com/googleapis/gax-go/commit/a6ce70c725c890533a9de6272d3b5ba2e336d6bb)) + ## [2.4.0](https://github.com/googleapis/gax-go/compare/v2.3.0...v2.4.0) (2022-05-09) diff --git a/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go b/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go index 7d0128a0c..ed862c8b3 100644 --- a/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go +++ b/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go @@ -39,8 +39,10 @@ import ( jsonerror "github.com/googleapis/gax-go/v2/apierror/internal/proto" "google.golang.org/api/googleapi" "google.golang.org/genproto/googleapis/rpc/errdetails" + "google.golang.org/grpc/codes" "google.golang.org/grpc/status" "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/proto" ) // ErrDetails holds the google/rpc/error_details.proto messages. @@ -60,6 +62,30 @@ type ErrDetails struct { Unknown []interface{} } +// ErrMessageNotFound is used to signal ExtractProtoMessage found no matching messages. +var ErrMessageNotFound = errors.New("message not found") + +// ExtractProtoMessage provides a mechanism for extracting protobuf messages from the +// Unknown error details. If ExtractProtoMessage finds an unknown message of the same type, +// the content of the message is copied to the provided message. +// +// ExtractProtoMessage will return ErrMessageNotFound if there are no message matching the +// protocol buffer type of the provided message. +func (e ErrDetails) ExtractProtoMessage(v proto.Message) error { + if v == nil { + return ErrMessageNotFound + } + for _, elem := range e.Unknown { + if elemProto, ok := elem.(proto.Message); ok { + if v.ProtoReflect().Type() == elemProto.ProtoReflect().Type() { + proto.Merge(v, elemProto) + return nil + } + } + } + return ErrMessageNotFound +} + func (e ErrDetails) String() string { var d strings.Builder if e.ErrorInfo != nil { @@ -172,12 +198,12 @@ func (a *APIError) Unwrap() error { // Error returns a readable representation of the APIError. func (a *APIError) Error() string { var msg string - if a.status != nil { - msg = a.err.Error() - } else if a.httpErr != nil { + if a.httpErr != nil { // Truncate the googleapi.Error message because it dumps the Details in // an ugly way. msg = fmt.Sprintf("googleapi: Error %d: %s", a.httpErr.Code, a.httpErr.Message) + } else if a.status != nil { + msg = a.err.Error() } return strings.TrimSpace(fmt.Sprintf("%s\n%s", msg, a.details)) } @@ -208,30 +234,53 @@ func (a *APIError) Metadata() map[string]string { } -// FromError parses a Status error or a googleapi.Error and builds an APIError. -func FromError(err error) (*APIError, bool) { - if err == nil { - return nil, false - } - - ae := APIError{err: err} +// setDetailsFromError parses a Status error or a googleapi.Error +// and sets status and details or httpErr and details, respectively. +// It returns false if neither Status nor googleapi.Error can be parsed. +// When err is a googleapi.Error, the status of the returned error will +// be set to an Unknown error, rather than nil, since a nil code is +// interpreted as OK in the gRPC status package. +func (a *APIError) setDetailsFromError(err error) bool { st, isStatus := status.FromError(err) var herr *googleapi.Error isHTTPErr := errors.As(err, &herr) switch { case isStatus: - ae.status = st - ae.details = parseDetails(st.Details()) + a.status = st + a.details = parseDetails(st.Details()) case isHTTPErr: - ae.httpErr = herr - ae.details = parseHTTPDetails(herr) + a.httpErr = herr + a.details = parseHTTPDetails(herr) + a.status = status.New(codes.Unknown, herr.Message) default: - return nil, false + return false } + return true +} - return &ae, true +// FromError parses a Status error or a googleapi.Error and builds an +// APIError, wrapping the provided error in the new APIError. It +// returns false if neither Status nor googleapi.Error can be parsed. +func FromError(err error) (*APIError, bool) { + return ParseError(err, true) +} +// ParseError parses a Status error or a googleapi.Error and builds an +// APIError. If wrap is true, it wraps the error in the new APIError. +// It returns false if neither Status nor googleapi.Error can be parsed. +func ParseError(err error, wrap bool) (*APIError, bool) { + if err == nil { + return nil, false + } + ae := APIError{} + if wrap { + ae = APIError{err: err} + } + if !ae.setDetailsFromError(err) { + return nil, false + } + return &ae, true } // parseDetails accepts a slice of interface{} that should be backed by some diff --git a/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/custom_error.pb.go b/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/custom_error.pb.go new file mode 100644 index 000000000..e4b03f161 --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/custom_error.pb.go @@ -0,0 +1,256 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.0 +// protoc v3.17.3 +// source: custom_error.proto + +package jsonerror + +import ( + reflect "reflect" + sync "sync" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Error code for `CustomError`. +type CustomError_CustomErrorCode int32 + +const ( + // Default error. + CustomError_CUSTOM_ERROR_CODE_UNSPECIFIED CustomError_CustomErrorCode = 0 + // Too many foo. + CustomError_TOO_MANY_FOO CustomError_CustomErrorCode = 1 + // Not enough foo. + CustomError_NOT_ENOUGH_FOO CustomError_CustomErrorCode = 2 + // Catastrophic error. + CustomError_UNIVERSE_WAS_DESTROYED CustomError_CustomErrorCode = 3 +) + +// Enum value maps for CustomError_CustomErrorCode. +var ( + CustomError_CustomErrorCode_name = map[int32]string{ + 0: "CUSTOM_ERROR_CODE_UNSPECIFIED", + 1: "TOO_MANY_FOO", + 2: "NOT_ENOUGH_FOO", + 3: "UNIVERSE_WAS_DESTROYED", + } + CustomError_CustomErrorCode_value = map[string]int32{ + "CUSTOM_ERROR_CODE_UNSPECIFIED": 0, + "TOO_MANY_FOO": 1, + "NOT_ENOUGH_FOO": 2, + "UNIVERSE_WAS_DESTROYED": 3, + } +) + +func (x CustomError_CustomErrorCode) Enum() *CustomError_CustomErrorCode { + p := new(CustomError_CustomErrorCode) + *p = x + return p +} + +func (x CustomError_CustomErrorCode) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (CustomError_CustomErrorCode) Descriptor() protoreflect.EnumDescriptor { + return file_custom_error_proto_enumTypes[0].Descriptor() +} + +func (CustomError_CustomErrorCode) Type() protoreflect.EnumType { + return &file_custom_error_proto_enumTypes[0] +} + +func (x CustomError_CustomErrorCode) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use CustomError_CustomErrorCode.Descriptor instead. +func (CustomError_CustomErrorCode) EnumDescriptor() ([]byte, []int) { + return file_custom_error_proto_rawDescGZIP(), []int{0, 0} +} + +// CustomError is an example of a custom error message which may be included +// in an rpc status. It is not meant to reflect a standard error. +type CustomError struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Error code specific to the custom API being invoked. + Code CustomError_CustomErrorCode `protobuf:"varint,1,opt,name=code,proto3,enum=error.CustomError_CustomErrorCode" json:"code,omitempty"` + // Name of the failed entity. + Entity string `protobuf:"bytes,2,opt,name=entity,proto3" json:"entity,omitempty"` + // Message that describes the error. + ErrorMessage string `protobuf:"bytes,3,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"` +} + +func (x *CustomError) Reset() { + *x = CustomError{} + if protoimpl.UnsafeEnabled { + mi := &file_custom_error_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CustomError) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CustomError) ProtoMessage() {} + +func (x *CustomError) ProtoReflect() protoreflect.Message { + mi := &file_custom_error_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CustomError.ProtoReflect.Descriptor instead. +func (*CustomError) Descriptor() ([]byte, []int) { + return file_custom_error_proto_rawDescGZIP(), []int{0} +} + +func (x *CustomError) GetCode() CustomError_CustomErrorCode { + if x != nil { + return x.Code + } + return CustomError_CUSTOM_ERROR_CODE_UNSPECIFIED +} + +func (x *CustomError) GetEntity() string { + if x != nil { + return x.Entity + } + return "" +} + +func (x *CustomError) GetErrorMessage() string { + if x != nil { + return x.ErrorMessage + } + return "" +} + +var File_custom_error_proto protoreflect.FileDescriptor + +var file_custom_error_proto_rawDesc = []byte{ + 0x0a, 0x12, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0xfa, 0x01, 0x0a, 0x0b, + 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x36, 0x0a, 0x04, 0x63, + 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x22, 0x2e, 0x65, 0x72, 0x72, 0x6f, + 0x72, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x2e, 0x43, 0x75, + 0x73, 0x74, 0x6f, 0x6d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x43, 0x6f, 0x64, 0x65, 0x52, 0x04, 0x63, + 0x6f, 0x64, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x23, 0x0a, 0x0d, 0x65, + 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0c, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x22, 0x76, 0x0a, 0x0f, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x43, + 0x6f, 0x64, 0x65, 0x12, 0x21, 0x0a, 0x1d, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x45, 0x52, + 0x52, 0x4f, 0x52, 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, + 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x4f, 0x4f, 0x5f, 0x4d, 0x41, + 0x4e, 0x59, 0x5f, 0x46, 0x4f, 0x4f, 0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x4e, 0x4f, 0x54, 0x5f, + 0x45, 0x4e, 0x4f, 0x55, 0x47, 0x48, 0x5f, 0x46, 0x4f, 0x4f, 0x10, 0x02, 0x12, 0x1a, 0x0a, 0x16, + 0x55, 0x4e, 0x49, 0x56, 0x45, 0x52, 0x53, 0x45, 0x5f, 0x57, 0x41, 0x53, 0x5f, 0x44, 0x45, 0x53, + 0x54, 0x52, 0x4f, 0x59, 0x45, 0x44, 0x10, 0x03, 0x42, 0x43, 0x5a, 0x41, 0x67, 0x69, 0x74, 0x68, + 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, + 0x73, 0x2f, 0x67, 0x61, 0x78, 0x2d, 0x67, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x61, 0x70, 0x69, 0x65, + 0x72, 0x72, 0x6f, 0x72, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x3b, 0x6a, 0x73, 0x6f, 0x6e, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_custom_error_proto_rawDescOnce sync.Once + file_custom_error_proto_rawDescData = file_custom_error_proto_rawDesc +) + +func file_custom_error_proto_rawDescGZIP() []byte { + file_custom_error_proto_rawDescOnce.Do(func() { + file_custom_error_proto_rawDescData = protoimpl.X.CompressGZIP(file_custom_error_proto_rawDescData) + }) + return file_custom_error_proto_rawDescData +} + +var file_custom_error_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_custom_error_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_custom_error_proto_goTypes = []interface{}{ + (CustomError_CustomErrorCode)(0), // 0: error.CustomError.CustomErrorCode + (*CustomError)(nil), // 1: error.CustomError +} +var file_custom_error_proto_depIdxs = []int32{ + 0, // 0: error.CustomError.code:type_name -> error.CustomError.CustomErrorCode + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_custom_error_proto_init() } +func file_custom_error_proto_init() { + if File_custom_error_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_custom_error_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CustomError); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_custom_error_proto_rawDesc, + NumEnums: 1, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_custom_error_proto_goTypes, + DependencyIndexes: file_custom_error_proto_depIdxs, + EnumInfos: file_custom_error_proto_enumTypes, + MessageInfos: file_custom_error_proto_msgTypes, + }.Build() + File_custom_error_proto = out.File + file_custom_error_proto_rawDesc = nil + file_custom_error_proto_goTypes = nil + file_custom_error_proto_depIdxs = nil +} diff --git a/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/custom_error.proto b/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/custom_error.proto new file mode 100644 index 000000000..21678ae65 --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/custom_error.proto @@ -0,0 +1,50 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package error; + +option go_package = "github.com/googleapis/gax-go/v2/apierror/internal/proto;jsonerror"; + + +// CustomError is an example of a custom error message which may be included +// in an rpc status. It is not meant to reflect a standard error. +message CustomError { + + // Error code for `CustomError`. + enum CustomErrorCode { + // Default error. + CUSTOM_ERROR_CODE_UNSPECIFIED = 0; + + // Too many foo. + TOO_MANY_FOO = 1; + + // Not enough foo. + NOT_ENOUGH_FOO = 2; + + // Catastrophic error. + UNIVERSE_WAS_DESTROYED = 3; + + } + + // Error code specific to the custom API being invoked. + CustomErrorCode code = 1; + + // Name of the failed entity. + string entity = 2; + + // Message that describes the error. + string error_message = 3; +} diff --git a/vendor/github.com/googleapis/gax-go/v2/content_type.go b/vendor/github.com/googleapis/gax-go/v2/content_type.go new file mode 100644 index 000000000..1b53d0a3a --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/v2/content_type.go @@ -0,0 +1,112 @@ +// Copyright 2022, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package gax + +import ( + "io" + "io/ioutil" + "net/http" +) + +const sniffBuffSize = 512 + +func newContentSniffer(r io.Reader) *contentSniffer { + return &contentSniffer{r: r} +} + +// contentSniffer wraps a Reader, and reports the content type determined by sniffing up to 512 bytes from the Reader. +type contentSniffer struct { + r io.Reader + start []byte // buffer for the sniffed bytes. + err error // set to any error encountered while reading bytes to be sniffed. + + ctype string // set on first sniff. + sniffed bool // set to true on first sniff. +} + +func (cs *contentSniffer) Read(p []byte) (n int, err error) { + // Ensure that the content type is sniffed before any data is consumed from Reader. + _, _ = cs.ContentType() + + if len(cs.start) > 0 { + n := copy(p, cs.start) + cs.start = cs.start[n:] + return n, nil + } + + // We may have read some bytes into start while sniffing, even if the read ended in an error. + // We should first return those bytes, then the error. + if cs.err != nil { + return 0, cs.err + } + + // Now we have handled all bytes that were buffered while sniffing. Now just delegate to the underlying reader. + return cs.r.Read(p) +} + +// ContentType returns the sniffed content type, and whether the content type was successfully sniffed. +func (cs *contentSniffer) ContentType() (string, bool) { + if cs.sniffed { + return cs.ctype, cs.ctype != "" + } + cs.sniffed = true + // If ReadAll hits EOF, it returns err==nil. + cs.start, cs.err = ioutil.ReadAll(io.LimitReader(cs.r, sniffBuffSize)) + + // Don't try to detect the content type based on possibly incomplete data. + if cs.err != nil { + return "", false + } + + cs.ctype = http.DetectContentType(cs.start) + return cs.ctype, true +} + +// DetermineContentType determines the content type of the supplied reader. +// The content of media will be sniffed to determine the content type. +// After calling DetectContentType the caller must not perform further reads on +// media, but rather read from the Reader that is returned. +func DetermineContentType(media io.Reader) (io.Reader, string) { + // For backwards compatibility, allow clients to set content + // type by providing a ContentTyper for media. + // Note: This is an anonymous interface definition copied from googleapi.ContentTyper. + if typer, ok := media.(interface { + ContentType() string + }); ok { + return media, typer.ContentType() + } + + sniffer := newContentSniffer(media) + if ctype, ok := sniffer.ContentType(); ok { + return sniffer, ctype + } + // If content type could not be sniffed, reads from sniffer will eventually fail with an error. + return sniffer, "" +} diff --git a/vendor/github.com/googleapis/gax-go/v2/internal/version.go b/vendor/github.com/googleapis/gax-go/v2/internal/version.go index bf272a504..936873ec4 100644 --- a/vendor/github.com/googleapis/gax-go/v2/internal/version.go +++ b/vendor/github.com/googleapis/gax-go/v2/internal/version.go @@ -30,4 +30,4 @@ package internal // Version is the current tagged release of the library. -const Version = "2.4.0" +const Version = "2.7.1" diff --git a/vendor/github.com/prometheus/client_model/go/metrics.pb.go b/vendor/github.com/prometheus/client_model/go/metrics.pb.go index 2f4930d9d..35904ea19 100644 --- a/vendor/github.com/prometheus/client_model/go/metrics.pb.go +++ b/vendor/github.com/prometheus/client_model/go/metrics.pb.go @@ -1,5 +1,5 @@ // Code generated by protoc-gen-go. DO NOT EDIT. -// source: metrics.proto +// source: io/prometheus/client/metrics.proto package io_prometheus_client @@ -24,11 +24,18 @@ const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package type MetricType int32 const ( - MetricType_COUNTER MetricType = 0 - MetricType_GAUGE MetricType = 1 - MetricType_SUMMARY MetricType = 2 - MetricType_UNTYPED MetricType = 3 + // COUNTER must use the Metric field "counter". + MetricType_COUNTER MetricType = 0 + // GAUGE must use the Metric field "gauge". + MetricType_GAUGE MetricType = 1 + // SUMMARY must use the Metric field "summary". + MetricType_SUMMARY MetricType = 2 + // UNTYPED must use the Metric field "untyped". + MetricType_UNTYPED MetricType = 3 + // HISTOGRAM must use the Metric field "histogram". MetricType_HISTOGRAM MetricType = 4 + // GAUGE_HISTOGRAM must use the Metric field "histogram". + MetricType_GAUGE_HISTOGRAM MetricType = 5 ) var MetricType_name = map[int32]string{ @@ -37,14 +44,16 @@ var MetricType_name = map[int32]string{ 2: "SUMMARY", 3: "UNTYPED", 4: "HISTOGRAM", + 5: "GAUGE_HISTOGRAM", } var MetricType_value = map[string]int32{ - "COUNTER": 0, - "GAUGE": 1, - "SUMMARY": 2, - "UNTYPED": 3, - "HISTOGRAM": 4, + "COUNTER": 0, + "GAUGE": 1, + "SUMMARY": 2, + "UNTYPED": 3, + "HISTOGRAM": 4, + "GAUGE_HISTOGRAM": 5, } func (x MetricType) Enum() *MetricType { @@ -67,7 +76,7 @@ func (x *MetricType) UnmarshalJSON(data []byte) error { } func (MetricType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_6039342a2ba47b72, []int{0} + return fileDescriptor_d1e5ddb18987a258, []int{0} } type LabelPair struct { @@ -82,7 +91,7 @@ func (m *LabelPair) Reset() { *m = LabelPair{} } func (m *LabelPair) String() string { return proto.CompactTextString(m) } func (*LabelPair) ProtoMessage() {} func (*LabelPair) Descriptor() ([]byte, []int) { - return fileDescriptor_6039342a2ba47b72, []int{0} + return fileDescriptor_d1e5ddb18987a258, []int{0} } func (m *LabelPair) XXX_Unmarshal(b []byte) error { @@ -128,7 +137,7 @@ func (m *Gauge) Reset() { *m = Gauge{} } func (m *Gauge) String() string { return proto.CompactTextString(m) } func (*Gauge) ProtoMessage() {} func (*Gauge) Descriptor() ([]byte, []int) { - return fileDescriptor_6039342a2ba47b72, []int{1} + return fileDescriptor_d1e5ddb18987a258, []int{1} } func (m *Gauge) XXX_Unmarshal(b []byte) error { @@ -168,7 +177,7 @@ func (m *Counter) Reset() { *m = Counter{} } func (m *Counter) String() string { return proto.CompactTextString(m) } func (*Counter) ProtoMessage() {} func (*Counter) Descriptor() ([]byte, []int) { - return fileDescriptor_6039342a2ba47b72, []int{2} + return fileDescriptor_d1e5ddb18987a258, []int{2} } func (m *Counter) XXX_Unmarshal(b []byte) error { @@ -215,7 +224,7 @@ func (m *Quantile) Reset() { *m = Quantile{} } func (m *Quantile) String() string { return proto.CompactTextString(m) } func (*Quantile) ProtoMessage() {} func (*Quantile) Descriptor() ([]byte, []int) { - return fileDescriptor_6039342a2ba47b72, []int{3} + return fileDescriptor_d1e5ddb18987a258, []int{3} } func (m *Quantile) XXX_Unmarshal(b []byte) error { @@ -263,7 +272,7 @@ func (m *Summary) Reset() { *m = Summary{} } func (m *Summary) String() string { return proto.CompactTextString(m) } func (*Summary) ProtoMessage() {} func (*Summary) Descriptor() ([]byte, []int) { - return fileDescriptor_6039342a2ba47b72, []int{4} + return fileDescriptor_d1e5ddb18987a258, []int{4} } func (m *Summary) XXX_Unmarshal(b []byte) error { @@ -316,7 +325,7 @@ func (m *Untyped) Reset() { *m = Untyped{} } func (m *Untyped) String() string { return proto.CompactTextString(m) } func (*Untyped) ProtoMessage() {} func (*Untyped) Descriptor() ([]byte, []int) { - return fileDescriptor_6039342a2ba47b72, []int{5} + return fileDescriptor_d1e5ddb18987a258, []int{5} } func (m *Untyped) XXX_Unmarshal(b []byte) error { @@ -345,9 +354,34 @@ func (m *Untyped) GetValue() float64 { } type Histogram struct { - SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"` - SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"` - Bucket []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"` + SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"` + SampleCountFloat *float64 `protobuf:"fixed64,4,opt,name=sample_count_float,json=sampleCountFloat" json:"sample_count_float,omitempty"` + SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"` + // Buckets for the conventional histogram. + Bucket []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"` + // schema defines the bucket schema. Currently, valid numbers are -4 <= n <= 8. + // They are all for base-2 bucket schemas, where 1 is a bucket boundary in each case, and + // then each power of two is divided into 2^n logarithmic buckets. + // Or in other words, each bucket boundary is the previous boundary times 2^(2^-n). + // In the future, more bucket schemas may be added using numbers < -4 or > 8. + Schema *int32 `protobuf:"zigzag32,5,opt,name=schema" json:"schema,omitempty"` + ZeroThreshold *float64 `protobuf:"fixed64,6,opt,name=zero_threshold,json=zeroThreshold" json:"zero_threshold,omitempty"` + ZeroCount *uint64 `protobuf:"varint,7,opt,name=zero_count,json=zeroCount" json:"zero_count,omitempty"` + ZeroCountFloat *float64 `protobuf:"fixed64,8,opt,name=zero_count_float,json=zeroCountFloat" json:"zero_count_float,omitempty"` + // Negative buckets for the native histogram. + NegativeSpan []*BucketSpan `protobuf:"bytes,9,rep,name=negative_span,json=negativeSpan" json:"negative_span,omitempty"` + // Use either "negative_delta" or "negative_count", the former for + // regular histograms with integer counts, the latter for float + // histograms. + NegativeDelta []int64 `protobuf:"zigzag64,10,rep,name=negative_delta,json=negativeDelta" json:"negative_delta,omitempty"` + NegativeCount []float64 `protobuf:"fixed64,11,rep,name=negative_count,json=negativeCount" json:"negative_count,omitempty"` + // Positive buckets for the native histogram. + PositiveSpan []*BucketSpan `protobuf:"bytes,12,rep,name=positive_span,json=positiveSpan" json:"positive_span,omitempty"` + // Use either "positive_delta" or "positive_count", the former for + // regular histograms with integer counts, the latter for float + // histograms. + PositiveDelta []int64 `protobuf:"zigzag64,13,rep,name=positive_delta,json=positiveDelta" json:"positive_delta,omitempty"` + PositiveCount []float64 `protobuf:"fixed64,14,rep,name=positive_count,json=positiveCount" json:"positive_count,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -357,7 +391,7 @@ func (m *Histogram) Reset() { *m = Histogram{} } func (m *Histogram) String() string { return proto.CompactTextString(m) } func (*Histogram) ProtoMessage() {} func (*Histogram) Descriptor() ([]byte, []int) { - return fileDescriptor_6039342a2ba47b72, []int{6} + return fileDescriptor_d1e5ddb18987a258, []int{6} } func (m *Histogram) XXX_Unmarshal(b []byte) error { @@ -385,6 +419,13 @@ func (m *Histogram) GetSampleCount() uint64 { return 0 } +func (m *Histogram) GetSampleCountFloat() float64 { + if m != nil && m.SampleCountFloat != nil { + return *m.SampleCountFloat + } + return 0 +} + func (m *Histogram) GetSampleSum() float64 { if m != nil && m.SampleSum != nil { return *m.SampleSum @@ -399,8 +440,81 @@ func (m *Histogram) GetBucket() []*Bucket { return nil } +func (m *Histogram) GetSchema() int32 { + if m != nil && m.Schema != nil { + return *m.Schema + } + return 0 +} + +func (m *Histogram) GetZeroThreshold() float64 { + if m != nil && m.ZeroThreshold != nil { + return *m.ZeroThreshold + } + return 0 +} + +func (m *Histogram) GetZeroCount() uint64 { + if m != nil && m.ZeroCount != nil { + return *m.ZeroCount + } + return 0 +} + +func (m *Histogram) GetZeroCountFloat() float64 { + if m != nil && m.ZeroCountFloat != nil { + return *m.ZeroCountFloat + } + return 0 +} + +func (m *Histogram) GetNegativeSpan() []*BucketSpan { + if m != nil { + return m.NegativeSpan + } + return nil +} + +func (m *Histogram) GetNegativeDelta() []int64 { + if m != nil { + return m.NegativeDelta + } + return nil +} + +func (m *Histogram) GetNegativeCount() []float64 { + if m != nil { + return m.NegativeCount + } + return nil +} + +func (m *Histogram) GetPositiveSpan() []*BucketSpan { + if m != nil { + return m.PositiveSpan + } + return nil +} + +func (m *Histogram) GetPositiveDelta() []int64 { + if m != nil { + return m.PositiveDelta + } + return nil +} + +func (m *Histogram) GetPositiveCount() []float64 { + if m != nil { + return m.PositiveCount + } + return nil +} + +// A Bucket of a conventional histogram, each of which is treated as +// an individual counter-like time series by Prometheus. type Bucket struct { CumulativeCount *uint64 `protobuf:"varint,1,opt,name=cumulative_count,json=cumulativeCount" json:"cumulative_count,omitempty"` + CumulativeCountFloat *float64 `protobuf:"fixed64,4,opt,name=cumulative_count_float,json=cumulativeCountFloat" json:"cumulative_count_float,omitempty"` UpperBound *float64 `protobuf:"fixed64,2,opt,name=upper_bound,json=upperBound" json:"upper_bound,omitempty"` Exemplar *Exemplar `protobuf:"bytes,3,opt,name=exemplar" json:"exemplar,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` @@ -412,7 +526,7 @@ func (m *Bucket) Reset() { *m = Bucket{} } func (m *Bucket) String() string { return proto.CompactTextString(m) } func (*Bucket) ProtoMessage() {} func (*Bucket) Descriptor() ([]byte, []int) { - return fileDescriptor_6039342a2ba47b72, []int{7} + return fileDescriptor_d1e5ddb18987a258, []int{7} } func (m *Bucket) XXX_Unmarshal(b []byte) error { @@ -440,6 +554,13 @@ func (m *Bucket) GetCumulativeCount() uint64 { return 0 } +func (m *Bucket) GetCumulativeCountFloat() float64 { + if m != nil && m.CumulativeCountFloat != nil { + return *m.CumulativeCountFloat + } + return 0 +} + func (m *Bucket) GetUpperBound() float64 { if m != nil && m.UpperBound != nil { return *m.UpperBound @@ -454,6 +575,59 @@ func (m *Bucket) GetExemplar() *Exemplar { return nil } +// A BucketSpan defines a number of consecutive buckets in a native +// histogram with their offset. Logically, it would be more +// straightforward to include the bucket counts in the Span. However, +// the protobuf representation is more compact in the way the data is +// structured here (with all the buckets in a single array separate +// from the Spans). +type BucketSpan struct { + Offset *int32 `protobuf:"zigzag32,1,opt,name=offset" json:"offset,omitempty"` + Length *uint32 `protobuf:"varint,2,opt,name=length" json:"length,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BucketSpan) Reset() { *m = BucketSpan{} } +func (m *BucketSpan) String() string { return proto.CompactTextString(m) } +func (*BucketSpan) ProtoMessage() {} +func (*BucketSpan) Descriptor() ([]byte, []int) { + return fileDescriptor_d1e5ddb18987a258, []int{8} +} + +func (m *BucketSpan) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BucketSpan.Unmarshal(m, b) +} +func (m *BucketSpan) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BucketSpan.Marshal(b, m, deterministic) +} +func (m *BucketSpan) XXX_Merge(src proto.Message) { + xxx_messageInfo_BucketSpan.Merge(m, src) +} +func (m *BucketSpan) XXX_Size() int { + return xxx_messageInfo_BucketSpan.Size(m) +} +func (m *BucketSpan) XXX_DiscardUnknown() { + xxx_messageInfo_BucketSpan.DiscardUnknown(m) +} + +var xxx_messageInfo_BucketSpan proto.InternalMessageInfo + +func (m *BucketSpan) GetOffset() int32 { + if m != nil && m.Offset != nil { + return *m.Offset + } + return 0 +} + +func (m *BucketSpan) GetLength() uint32 { + if m != nil && m.Length != nil { + return *m.Length + } + return 0 +} + type Exemplar struct { Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"` Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"` @@ -467,7 +641,7 @@ func (m *Exemplar) Reset() { *m = Exemplar{} } func (m *Exemplar) String() string { return proto.CompactTextString(m) } func (*Exemplar) ProtoMessage() {} func (*Exemplar) Descriptor() ([]byte, []int) { - return fileDescriptor_6039342a2ba47b72, []int{8} + return fileDescriptor_d1e5ddb18987a258, []int{9} } func (m *Exemplar) XXX_Unmarshal(b []byte) error { @@ -526,7 +700,7 @@ func (m *Metric) Reset() { *m = Metric{} } func (m *Metric) String() string { return proto.CompactTextString(m) } func (*Metric) ProtoMessage() {} func (*Metric) Descriptor() ([]byte, []int) { - return fileDescriptor_6039342a2ba47b72, []int{9} + return fileDescriptor_d1e5ddb18987a258, []int{10} } func (m *Metric) XXX_Unmarshal(b []byte) error { @@ -610,7 +784,7 @@ func (m *MetricFamily) Reset() { *m = MetricFamily{} } func (m *MetricFamily) String() string { return proto.CompactTextString(m) } func (*MetricFamily) ProtoMessage() {} func (*MetricFamily) Descriptor() ([]byte, []int) { - return fileDescriptor_6039342a2ba47b72, []int{10} + return fileDescriptor_d1e5ddb18987a258, []int{11} } func (m *MetricFamily) XXX_Unmarshal(b []byte) error { @@ -669,55 +843,72 @@ func init() { proto.RegisterType((*Untyped)(nil), "io.prometheus.client.Untyped") proto.RegisterType((*Histogram)(nil), "io.prometheus.client.Histogram") proto.RegisterType((*Bucket)(nil), "io.prometheus.client.Bucket") + proto.RegisterType((*BucketSpan)(nil), "io.prometheus.client.BucketSpan") proto.RegisterType((*Exemplar)(nil), "io.prometheus.client.Exemplar") proto.RegisterType((*Metric)(nil), "io.prometheus.client.Metric") proto.RegisterType((*MetricFamily)(nil), "io.prometheus.client.MetricFamily") } -func init() { proto.RegisterFile("metrics.proto", fileDescriptor_6039342a2ba47b72) } - -var fileDescriptor_6039342a2ba47b72 = []byte{ - // 665 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x54, 0xcd, 0x6e, 0xd3, 0x4c, - 0x14, 0xfd, 0xdc, 0x38, 0x3f, 0xbe, 0x69, 0x3f, 0xa2, 0x51, 0x17, 0x56, 0xa1, 0x24, 0x78, 0x55, - 0x58, 0x38, 0xa2, 0x6a, 0x05, 0x2a, 0xb0, 0x68, 0x4b, 0x48, 0x91, 0x48, 0x5b, 0x26, 0xc9, 0xa2, - 0xb0, 0x88, 0x1c, 0x77, 0x70, 0x2c, 0x3c, 0xb1, 0xb1, 0x67, 0x2a, 0xb2, 0x66, 0xc1, 0x16, 0x5e, - 0x81, 0x17, 0x05, 0xcd, 0x8f, 0x6d, 0x2a, 0xb9, 0x95, 0x40, 0xec, 0x66, 0xee, 0x3d, 0xe7, 0xfa, - 0xcc, 0xf8, 0x9c, 0x81, 0x0d, 0x4a, 0x58, 0x1a, 0xfa, 0x99, 0x9b, 0xa4, 0x31, 0x8b, 0xd1, 0x66, - 0x18, 0x8b, 0x15, 0x25, 0x6c, 0x41, 0x78, 0xe6, 0xfa, 0x51, 0x48, 0x96, 0x6c, 0xab, 0x1b, 0xc4, - 0x71, 0x10, 0x91, 0xbe, 0xc4, 0xcc, 0xf9, 0x87, 0x3e, 0x0b, 0x29, 0xc9, 0x98, 0x47, 0x13, 0x45, - 0x73, 0xf6, 0xc1, 0x7a, 0xe3, 0xcd, 0x49, 0x74, 0xee, 0x85, 0x29, 0x42, 0x60, 0x2e, 0x3d, 0x4a, - 0x6c, 0xa3, 0x67, 0xec, 0x58, 0x58, 0xae, 0xd1, 0x26, 0xd4, 0xaf, 0xbc, 0x88, 0x13, 0x7b, 0x4d, - 0x16, 0xd5, 0xc6, 0xd9, 0x86, 0xfa, 0xd0, 0xe3, 0xc1, 0x6f, 0x6d, 0xc1, 0x31, 0xf2, 0xf6, 0x7b, - 0x68, 0x1e, 0xc7, 0x7c, 0xc9, 0x48, 0x5a, 0x0d, 0x40, 0x07, 0xd0, 0x22, 0x9f, 0x09, 0x4d, 0x22, - 0x2f, 0x95, 0x83, 0xdb, 0xbb, 0xf7, 0xdd, 0xaa, 0x03, 0xb8, 0x03, 0x8d, 0xc2, 0x05, 0xde, 0x79, - 0x0e, 0xad, 0xb7, 0xdc, 0x5b, 0xb2, 0x30, 0x22, 0x68, 0x0b, 0x5a, 0x9f, 0xf4, 0x5a, 0x7f, 0xa0, - 0xd8, 0x5f, 0x57, 0x5e, 0x48, 0xfb, 0x6a, 0x40, 0x73, 0xcc, 0x29, 0xf5, 0xd2, 0x15, 0x7a, 0x00, - 0xeb, 0x99, 0x47, 0x93, 0x88, 0xcc, 0x7c, 0xa1, 0x56, 0x4e, 0x30, 0x71, 0x5b, 0xd5, 0xe4, 0x01, - 0xd0, 0x36, 0x80, 0x86, 0x64, 0x9c, 0xea, 0x49, 0x96, 0xaa, 0x8c, 0x39, 0x15, 0xe7, 0x28, 0xbe, - 0x5f, 0xeb, 0xd5, 0x6e, 0x3e, 0x47, 0xae, 0xb8, 0xd4, 0xe7, 0x74, 0xa1, 0x39, 0x5d, 0xb2, 0x55, - 0x42, 0x2e, 0x6f, 0xb8, 0xc5, 0x2f, 0x06, 0x58, 0x27, 0x61, 0xc6, 0xe2, 0x20, 0xf5, 0xe8, 0x3f, - 0x10, 0xbb, 0x07, 0x8d, 0x39, 0xf7, 0x3f, 0x12, 0xa6, 0xa5, 0xde, 0xab, 0x96, 0x7a, 0x24, 0x31, - 0x58, 0x63, 0x9d, 0x6f, 0x06, 0x34, 0x54, 0x09, 0x3d, 0x84, 0x8e, 0xcf, 0x29, 0x8f, 0x3c, 0x16, - 0x5e, 0x5d, 0x97, 0x71, 0xa7, 0xac, 0x2b, 0x29, 0x5d, 0x68, 0xf3, 0x24, 0x21, 0xe9, 0x6c, 0x1e, - 0xf3, 0xe5, 0xa5, 0xd6, 0x02, 0xb2, 0x74, 0x24, 0x2a, 0xd7, 0x1c, 0x50, 0xfb, 0x43, 0x07, 0x7c, - 0x37, 0xa0, 0x95, 0x97, 0xd1, 0x3e, 0xd4, 0x23, 0xe1, 0x60, 0xdb, 0x90, 0x87, 0xea, 0x56, 0x4f, - 0x29, 0x4c, 0x8e, 0x15, 0xba, 0xda, 0x1d, 0xe8, 0x29, 0x58, 0x45, 0x42, 0xb4, 0xac, 0x2d, 0x57, - 0x65, 0xc8, 0xcd, 0x33, 0xe4, 0x4e, 0x72, 0x04, 0x2e, 0xc1, 0xce, 0xcf, 0x35, 0x68, 0x8c, 0x64, - 0x22, 0xff, 0x56, 0xd1, 0x63, 0xa8, 0x07, 0x22, 0x53, 0x3a, 0x10, 0x77, 0xab, 0x69, 0x32, 0x76, - 0x58, 0x21, 0xd1, 0x13, 0x68, 0xfa, 0x2a, 0x67, 0x5a, 0xec, 0x76, 0x35, 0x49, 0x87, 0x11, 0xe7, - 0x68, 0x41, 0xcc, 0x54, 0x08, 0x6c, 0xf3, 0x36, 0xa2, 0x4e, 0x0a, 0xce, 0xd1, 0x82, 0xc8, 0x95, - 0x69, 0xed, 0xfa, 0x6d, 0x44, 0xed, 0x6c, 0x9c, 0xa3, 0xd1, 0x0b, 0xb0, 0x16, 0xb9, 0x97, 0xed, - 0xa6, 0xa4, 0xde, 0x70, 0x31, 0x85, 0xe5, 0x71, 0xc9, 0x10, 0xee, 0x2f, 0xee, 0x7a, 0x46, 0x33, - 0xbb, 0xd1, 0x33, 0x76, 0x6a, 0xb8, 0x5d, 0xd4, 0x46, 0x99, 0xf3, 0xc3, 0x80, 0x75, 0xf5, 0x07, - 0x5e, 0x79, 0x34, 0x8c, 0x56, 0x95, 0xcf, 0x19, 0x02, 0x73, 0x41, 0xa2, 0x44, 0xbf, 0x66, 0x72, - 0x8d, 0xf6, 0xc0, 0x14, 0x1a, 0xe5, 0x15, 0xfe, 0xbf, 0xdb, 0xab, 0x56, 0xa5, 0x26, 0x4f, 0x56, - 0x09, 0xc1, 0x12, 0x2d, 0xd2, 0xa4, 0x5e, 0x60, 0xdb, 0xbc, 0x2d, 0x4d, 0x8a, 0x87, 0x35, 0xf6, - 0xd1, 0x08, 0xa0, 0x9c, 0x84, 0xda, 0xd0, 0x3c, 0x3e, 0x9b, 0x9e, 0x4e, 0x06, 0xb8, 0xf3, 0x1f, - 0xb2, 0xa0, 0x3e, 0x3c, 0x9c, 0x0e, 0x07, 0x1d, 0x43, 0xd4, 0xc7, 0xd3, 0xd1, 0xe8, 0x10, 0x5f, - 0x74, 0xd6, 0xc4, 0x66, 0x7a, 0x3a, 0xb9, 0x38, 0x1f, 0xbc, 0xec, 0xd4, 0xd0, 0x06, 0x58, 0x27, - 0xaf, 0xc7, 0x93, 0xb3, 0x21, 0x3e, 0x1c, 0x75, 0xcc, 0x23, 0x0c, 0x95, 0xef, 0xfe, 0xbb, 0x83, - 0x20, 0x64, 0x0b, 0x3e, 0x77, 0xfd, 0x98, 0xf6, 0xcb, 0x6e, 0x5f, 0x75, 0x67, 0x34, 0xbe, 0x24, - 0x51, 0x3f, 0x88, 0x9f, 0x85, 0xf1, 0xac, 0xec, 0xce, 0x54, 0xf7, 0x57, 0x00, 0x00, 0x00, 0xff, - 0xff, 0xd0, 0x84, 0x91, 0x73, 0x59, 0x06, 0x00, 0x00, +func init() { + proto.RegisterFile("io/prometheus/client/metrics.proto", fileDescriptor_d1e5ddb18987a258) +} + +var fileDescriptor_d1e5ddb18987a258 = []byte{ + // 896 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x56, 0xdd, 0x8e, 0xdb, 0x44, + 0x18, 0xc5, 0x9b, 0x5f, 0x7f, 0xd9, 0x6c, 0xd3, 0x61, 0x55, 0x59, 0x0b, 0xcb, 0x06, 0x4b, 0x48, + 0x0b, 0x42, 0x8e, 0x40, 0x5b, 0x81, 0x0a, 0x5c, 0xec, 0xb6, 0xe9, 0x16, 0x89, 0xb4, 0x65, 0x92, + 0x5c, 0x14, 0x2e, 0xac, 0x49, 0x32, 0xeb, 0x58, 0x78, 0x3c, 0xc6, 0x1e, 0x57, 0x2c, 0x2f, 0xc0, + 0x35, 0xaf, 0xc0, 0xc3, 0xf0, 0x22, 0x3c, 0x08, 0x68, 0xfe, 0xec, 0xdd, 0xe2, 0x94, 0xd2, 0x3b, + 0x7f, 0x67, 0xce, 0xf7, 0xcd, 0x39, 0xe3, 0xc9, 0x71, 0xc0, 0x8f, 0xf9, 0x24, 0xcb, 0x39, 0xa3, + 0x62, 0x4b, 0xcb, 0x62, 0xb2, 0x4e, 0x62, 0x9a, 0x8a, 0x09, 0xa3, 0x22, 0x8f, 0xd7, 0x45, 0x90, + 0xe5, 0x5c, 0x70, 0x74, 0x18, 0xf3, 0xa0, 0xe6, 0x04, 0x9a, 0x73, 0x74, 0x12, 0x71, 0x1e, 0x25, + 0x74, 0xa2, 0x38, 0xab, 0xf2, 0x6a, 0x22, 0x62, 0x46, 0x0b, 0x41, 0x58, 0xa6, 0xdb, 0xfc, 0xfb, + 0xe0, 0x7e, 0x47, 0x56, 0x34, 0x79, 0x4e, 0xe2, 0x1c, 0x21, 0x68, 0xa7, 0x84, 0x51, 0xcf, 0x19, + 0x3b, 0xa7, 0x2e, 0x56, 0xcf, 0xe8, 0x10, 0x3a, 0x2f, 0x49, 0x52, 0x52, 0x6f, 0x4f, 0x81, 0xba, + 0xf0, 0x8f, 0xa1, 0x73, 0x49, 0xca, 0xe8, 0xc6, 0xb2, 0xec, 0x71, 0xec, 0xf2, 0x8f, 0xd0, 0x7b, + 0xc8, 0xcb, 0x54, 0xd0, 0xbc, 0x99, 0x80, 0x1e, 0x40, 0x9f, 0xfe, 0x42, 0x59, 0x96, 0x90, 0x5c, + 0x0d, 0x1e, 0x7c, 0xfe, 0x41, 0xd0, 0x64, 0x20, 0x98, 0x1a, 0x16, 0xae, 0xf8, 0xfe, 0xd7, 0xd0, + 0xff, 0xbe, 0x24, 0xa9, 0x88, 0x13, 0x8a, 0x8e, 0xa0, 0xff, 0xb3, 0x79, 0x36, 0x1b, 0x54, 0xf5, + 0x6d, 0xe5, 0x95, 0xb4, 0xdf, 0x1c, 0xe8, 0xcd, 0x4b, 0xc6, 0x48, 0x7e, 0x8d, 0x3e, 0x84, 0xfd, + 0x82, 0xb0, 0x2c, 0xa1, 0xe1, 0x5a, 0xaa, 0x55, 0x13, 0xda, 0x78, 0xa0, 0x31, 0x65, 0x00, 0x1d, + 0x03, 0x18, 0x4a, 0x51, 0x32, 0x33, 0xc9, 0xd5, 0xc8, 0xbc, 0x64, 0xd2, 0x47, 0xb5, 0x7f, 0x6b, + 0xdc, 0xda, 0xed, 0xc3, 0x2a, 0xae, 0xf5, 0xf9, 0x27, 0xd0, 0x5b, 0xa6, 0xe2, 0x3a, 0xa3, 0x9b, + 0x1d, 0xa7, 0xf8, 0x57, 0x1b, 0xdc, 0x27, 0x71, 0x21, 0x78, 0x94, 0x13, 0xf6, 0x26, 0x62, 0x3f, + 0x05, 0x74, 0x93, 0x12, 0x5e, 0x25, 0x9c, 0x08, 0xaf, 0xad, 0x66, 0x8e, 0x6e, 0x10, 0x1f, 0x4b, + 0xfc, 0xbf, 0xac, 0x9d, 0x41, 0x77, 0x55, 0xae, 0x7f, 0xa2, 0xc2, 0x18, 0x7b, 0xbf, 0xd9, 0xd8, + 0x85, 0xe2, 0x60, 0xc3, 0x45, 0xf7, 0xa0, 0x5b, 0xac, 0xb7, 0x94, 0x11, 0xaf, 0x33, 0x76, 0x4e, + 0xef, 0x62, 0x53, 0xa1, 0x8f, 0xe0, 0xe0, 0x57, 0x9a, 0xf3, 0x50, 0x6c, 0x73, 0x5a, 0x6c, 0x79, + 0xb2, 0xf1, 0xba, 0x6a, 0xc3, 0xa1, 0x44, 0x17, 0x16, 0x94, 0x9a, 0x14, 0x4d, 0x5b, 0xec, 0x29, + 0x8b, 0xae, 0x44, 0xb4, 0xc1, 0x53, 0x18, 0xd5, 0xcb, 0xc6, 0x5e, 0x5f, 0xcd, 0x39, 0xa8, 0x48, + 0xda, 0xdc, 0x14, 0x86, 0x29, 0x8d, 0x88, 0x88, 0x5f, 0xd2, 0xb0, 0xc8, 0x48, 0xea, 0xb9, 0xca, + 0xc4, 0xf8, 0x75, 0x26, 0xe6, 0x19, 0x49, 0xf1, 0xbe, 0x6d, 0x93, 0x95, 0x94, 0x5d, 0x8d, 0xd9, + 0xd0, 0x44, 0x10, 0x0f, 0xc6, 0xad, 0x53, 0x84, 0xab, 0xe1, 0x8f, 0x24, 0x78, 0x8b, 0xa6, 0xa5, + 0x0f, 0xc6, 0x2d, 0xe9, 0xce, 0xa2, 0x5a, 0xfe, 0x14, 0x86, 0x19, 0x2f, 0xe2, 0x5a, 0xd4, 0xfe, + 0x9b, 0x8a, 0xb2, 0x6d, 0x56, 0x54, 0x35, 0x46, 0x8b, 0x1a, 0x6a, 0x51, 0x16, 0xad, 0x44, 0x55, + 0x34, 0x2d, 0xea, 0x40, 0x8b, 0xb2, 0xa8, 0x12, 0xe5, 0xff, 0xe9, 0x40, 0x57, 0x6f, 0x85, 0x3e, + 0x86, 0xd1, 0xba, 0x64, 0x65, 0x72, 0xd3, 0x88, 0xbe, 0x66, 0x77, 0x6a, 0x5c, 0x5b, 0x39, 0x83, + 0x7b, 0xaf, 0x52, 0x6f, 0x5d, 0xb7, 0xc3, 0x57, 0x1a, 0xf4, 0x5b, 0x39, 0x81, 0x41, 0x99, 0x65, + 0x34, 0x0f, 0x57, 0xbc, 0x4c, 0x37, 0xe6, 0xce, 0x81, 0x82, 0x2e, 0x24, 0x72, 0x2b, 0x17, 0x5a, + 0xff, 0x3b, 0x17, 0xa0, 0x3e, 0x32, 0x79, 0x11, 0xf9, 0xd5, 0x55, 0x41, 0xb5, 0x83, 0xbb, 0xd8, + 0x54, 0x12, 0x4f, 0x68, 0x1a, 0x89, 0xad, 0xda, 0x7d, 0x88, 0x4d, 0xe5, 0xff, 0xee, 0x40, 0xdf, + 0x0e, 0x45, 0xf7, 0xa1, 0x93, 0xc8, 0x54, 0xf4, 0x1c, 0xf5, 0x82, 0x4e, 0x9a, 0x35, 0x54, 0xc1, + 0x89, 0x35, 0xbb, 0x39, 0x71, 0xd0, 0x97, 0xe0, 0x56, 0xa9, 0x6b, 0x4c, 0x1d, 0x05, 0x3a, 0x97, + 0x03, 0x9b, 0xcb, 0xc1, 0xc2, 0x32, 0x70, 0x4d, 0xf6, 0xff, 0xde, 0x83, 0xee, 0x4c, 0xa5, 0xfc, + 0xdb, 0x2a, 0xfa, 0x0c, 0x3a, 0x91, 0xcc, 0x69, 0x13, 0xb2, 0xef, 0x35, 0xb7, 0xa9, 0x28, 0xc7, + 0x9a, 0x89, 0xbe, 0x80, 0xde, 0x5a, 0x67, 0xb7, 0x11, 0x7b, 0xdc, 0xdc, 0x64, 0x02, 0x1e, 0x5b, + 0xb6, 0x6c, 0x2c, 0x74, 0xb0, 0xaa, 0x3b, 0xb0, 0xb3, 0xd1, 0xa4, 0x2f, 0xb6, 0x6c, 0xd9, 0x58, + 0xea, 0x20, 0x54, 0xa1, 0xb1, 0xb3, 0xd1, 0xa4, 0x25, 0xb6, 0x6c, 0xf4, 0x0d, 0xb8, 0x5b, 0x9b, + 0x8f, 0x2a, 0x2c, 0x76, 0x1e, 0x4c, 0x15, 0xa3, 0xb8, 0xee, 0x90, 0x89, 0x5a, 0x9d, 0x75, 0xc8, + 0x0a, 0x95, 0x48, 0x2d, 0x3c, 0xa8, 0xb0, 0x59, 0xe1, 0xff, 0xe1, 0xc0, 0xbe, 0x7e, 0x03, 0x8f, + 0x09, 0x8b, 0x93, 0xeb, 0xc6, 0x4f, 0x24, 0x82, 0xf6, 0x96, 0x26, 0x99, 0xf9, 0x42, 0xaa, 0x67, + 0x74, 0x06, 0x6d, 0xa9, 0x51, 0x1d, 0xe1, 0xc1, 0xae, 0x5f, 0xb8, 0x9e, 0xbc, 0xb8, 0xce, 0x28, + 0x56, 0x6c, 0x99, 0xb9, 0xfa, 0xab, 0xee, 0xb5, 0x5f, 0x97, 0xb9, 0xba, 0x0f, 0x1b, 0xee, 0x27, + 0x2b, 0x80, 0x7a, 0x12, 0x1a, 0x40, 0xef, 0xe1, 0xb3, 0xe5, 0xd3, 0xc5, 0x14, 0x8f, 0xde, 0x41, + 0x2e, 0x74, 0x2e, 0xcf, 0x97, 0x97, 0xd3, 0x91, 0x23, 0xf1, 0xf9, 0x72, 0x36, 0x3b, 0xc7, 0x2f, + 0x46, 0x7b, 0xb2, 0x58, 0x3e, 0x5d, 0xbc, 0x78, 0x3e, 0x7d, 0x34, 0x6a, 0xa1, 0x21, 0xb8, 0x4f, + 0xbe, 0x9d, 0x2f, 0x9e, 0x5d, 0xe2, 0xf3, 0xd9, 0xa8, 0x8d, 0xde, 0x85, 0x3b, 0xaa, 0x27, 0xac, + 0xc1, 0xce, 0x05, 0x86, 0xc6, 0x3f, 0x18, 0x3f, 0x3c, 0x88, 0x62, 0xb1, 0x2d, 0x57, 0xc1, 0x9a, + 0xb3, 0x7f, 0xff, 0x45, 0x09, 0x19, 0xdf, 0xd0, 0x64, 0x12, 0xf1, 0xaf, 0x62, 0x1e, 0xd6, 0xab, + 0xa1, 0x5e, 0xfd, 0x27, 0x00, 0x00, 0xff, 0xff, 0x16, 0x77, 0x81, 0x98, 0xd7, 0x08, 0x00, 0x00, } diff --git a/vendor/github.com/spf13/afero/.travis.yml b/vendor/github.com/spf13/afero/.travis.yml deleted file mode 100644 index e944f5947..000000000 --- a/vendor/github.com/spf13/afero/.travis.yml +++ /dev/null @@ -1,26 +0,0 @@ -sudo: false -language: go -arch: - - amd64 - - ppc64e - -go: - - "1.14" - - "1.15" - - "1.16" - - tip - -os: - - linux - - osx - -matrix: - allow_failures: - - go: tip - fast_finish: true - -script: - - go build -v ./... - - go test -count=1 -cover -race -v ./... - - go vet ./... - - FILES=$(gofmt -s -l . zipfs sftpfs mem tarfs); if [[ -n "${FILES}" ]]; then echo "You have go format errors; gofmt your changes"; exit 1; fi diff --git a/vendor/github.com/spf13/afero/README.md b/vendor/github.com/spf13/afero/README.md index cab257f56..3bafbfdfc 100644 --- a/vendor/github.com/spf13/afero/README.md +++ b/vendor/github.com/spf13/afero/README.md @@ -2,7 +2,7 @@ A FileSystem Abstraction System for Go -[![Build Status](https://travis-ci.org/spf13/afero.svg)](https://travis-ci.org/spf13/afero) [![Build status](https://ci.appveyor.com/api/projects/status/github/spf13/afero?branch=master&svg=true)](https://ci.appveyor.com/project/spf13/afero) [![GoDoc](https://godoc.org/github.com/spf13/afero?status.svg)](https://godoc.org/github.com/spf13/afero) [![Join the chat at https://gitter.im/spf13/afero](https://badges.gitter.im/Dev%20Chat.svg)](https://gitter.im/spf13/afero?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) +[![Test](https://github.com/spf13/afero/actions/workflows/test.yml/badge.svg)](https://github.com/spf13/afero/actions/workflows/test.yml) [![GoDoc](https://godoc.org/github.com/spf13/afero?status.svg)](https://godoc.org/github.com/spf13/afero) [![Join the chat at https://gitter.im/spf13/afero](https://badges.gitter.im/Dev%20Chat.svg)](https://gitter.im/spf13/afero?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) # Overview diff --git a/vendor/github.com/spf13/afero/afero.go b/vendor/github.com/spf13/afero/afero.go index 469ff7d2d..199480cd0 100644 --- a/vendor/github.com/spf13/afero/afero.go +++ b/vendor/github.com/spf13/afero/afero.go @@ -103,8 +103,8 @@ type Fs interface { var ( ErrFileClosed = errors.New("File is closed") - ErrOutOfRange = errors.New("Out of range") - ErrTooLarge = errors.New("Too large") + ErrOutOfRange = errors.New("out of range") + ErrTooLarge = errors.New("too large") ErrFileNotFound = os.ErrNotExist ErrFileExists = os.ErrExist ErrDestinationExists = os.ErrExist diff --git a/vendor/github.com/spf13/afero/appveyor.yml b/vendor/github.com/spf13/afero/appveyor.yml index 5d2f34bf1..65e20e8ca 100644 --- a/vendor/github.com/spf13/afero/appveyor.yml +++ b/vendor/github.com/spf13/afero/appveyor.yml @@ -1,3 +1,5 @@ +# This currently does nothing. We have moved to GitHub action, but this is kept +# until spf13 has disabled this project in AppVeyor. version: '{build}' clone_folder: C:\gopath\src\github.com\spf13\afero environment: @@ -6,10 +8,3 @@ build_script: - cmd: >- go version - go env - - go get -v github.com/spf13/afero/... - - go build -v github.com/spf13/afero/... -test_script: -- cmd: go test -count=1 -cover -race -v github.com/spf13/afero/... diff --git a/vendor/github.com/spf13/afero/basepath.go b/vendor/github.com/spf13/afero/basepath.go index 4f9832829..70a1d9168 100644 --- a/vendor/github.com/spf13/afero/basepath.go +++ b/vendor/github.com/spf13/afero/basepath.go @@ -1,6 +1,7 @@ package afero import ( + "io/fs" "os" "path/filepath" "runtime" @@ -8,7 +9,10 @@ import ( "time" ) -var _ Lstater = (*BasePathFs)(nil) +var ( + _ Lstater = (*BasePathFs)(nil) + _ fs.ReadDirFile = (*BasePathFile)(nil) +) // The BasePathFs restricts all operations to a given path within an Fs. // The given file name to the operations on this Fs will be prepended with @@ -33,6 +37,14 @@ func (f *BasePathFile) Name() string { return strings.TrimPrefix(sourcename, filepath.Clean(f.path)) } +func (f *BasePathFile) ReadDir(n int) ([]fs.DirEntry, error) { + if rdf, ok := f.File.(fs.ReadDirFile); ok { + return rdf.ReadDir(n) + + } + return readDirFile{f.File}.ReadDir(n) +} + func NewBasePathFs(source Fs, path string) Fs { return &BasePathFs{source: source, path: path} } diff --git a/vendor/github.com/spf13/afero/const_bsds.go b/vendor/github.com/spf13/afero/const_bsds.go index 18b45824b..eed0f225f 100644 --- a/vendor/github.com/spf13/afero/const_bsds.go +++ b/vendor/github.com/spf13/afero/const_bsds.go @@ -11,6 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build aix || darwin || openbsd || freebsd || netbsd || dragonfly // +build aix darwin openbsd freebsd netbsd dragonfly package afero diff --git a/vendor/github.com/spf13/afero/const_win_unix.go b/vendor/github.com/spf13/afero/const_win_unix.go index 2b850e4dd..004d57e2f 100644 --- a/vendor/github.com/spf13/afero/const_win_unix.go +++ b/vendor/github.com/spf13/afero/const_win_unix.go @@ -10,12 +10,8 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// +build !darwin -// +build !openbsd -// +build !freebsd -// +build !dragonfly -// +build !netbsd -// +build !aix +//go:build !darwin && !openbsd && !freebsd && !dragonfly && !netbsd && !aix +// +build !darwin,!openbsd,!freebsd,!dragonfly,!netbsd,!aix package afero diff --git a/vendor/github.com/spf13/afero/httpFs.go b/vendor/github.com/spf13/afero/httpFs.go index 2b86e30d1..ac0de6d51 100644 --- a/vendor/github.com/spf13/afero/httpFs.go +++ b/vendor/github.com/spf13/afero/httpFs.go @@ -29,7 +29,7 @@ type httpDir struct { } func (d httpDir) Open(name string) (http.File, error) { - if filepath.Separator != '/' && strings.IndexRune(name, filepath.Separator) >= 0 || + if filepath.Separator != '/' && strings.ContainsRune(name, filepath.Separator) || strings.Contains(name, "\x00") { return nil, errors.New("http: invalid character in file path") } diff --git a/vendor/github.com/spf13/afero/internal/common/adapters.go b/vendor/github.com/spf13/afero/internal/common/adapters.go new file mode 100644 index 000000000..60685caa5 --- /dev/null +++ b/vendor/github.com/spf13/afero/internal/common/adapters.go @@ -0,0 +1,27 @@ +// Copyright © 2022 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package common + +import "io/fs" + +// FileInfoDirEntry provides an adapter from os.FileInfo to fs.DirEntry +type FileInfoDirEntry struct { + fs.FileInfo +} + +var _ fs.DirEntry = FileInfoDirEntry{} + +func (d FileInfoDirEntry) Type() fs.FileMode { return d.FileInfo.Mode().Type() } + +func (d FileInfoDirEntry) Info() (fs.FileInfo, error) { return d.FileInfo, nil } diff --git a/vendor/github.com/spf13/afero/iofs.go b/vendor/github.com/spf13/afero/iofs.go index c80345536..938b9316e 100644 --- a/vendor/github.com/spf13/afero/iofs.go +++ b/vendor/github.com/spf13/afero/iofs.go @@ -1,3 +1,4 @@ +//go:build go1.16 // +build go1.16 package afero @@ -7,7 +8,10 @@ import ( "io/fs" "os" "path" + "sort" "time" + + "github.com/spf13/afero/internal/common" ) // IOFS adopts afero.Fs to stdlib io/fs.FS @@ -66,14 +70,31 @@ func (iofs IOFS) Glob(pattern string) ([]string, error) { } func (iofs IOFS) ReadDir(name string) ([]fs.DirEntry, error) { - items, err := ReadDir(iofs.Fs, name) + f, err := iofs.Fs.Open(name) if err != nil { return nil, iofs.wrapError("readdir", name, err) } + defer f.Close() + + if rdf, ok := f.(fs.ReadDirFile); ok { + items, err := rdf.ReadDir(-1) + if err != nil { + return nil, iofs.wrapError("readdir", name, err) + } + sort.Slice(items, func(i, j int) bool { return items[i].Name() < items[j].Name() }) + return items, nil + } + + items, err := f.Readdir(-1) + if err != nil { + return nil, iofs.wrapError("readdir", name, err) + } + sort.Sort(byName(items)) + ret := make([]fs.DirEntry, len(items)) for i := range items { - ret[i] = dirEntry{items[i]} + ret[i] = common.FileInfoDirEntry{FileInfo: items[i]} } return ret, nil @@ -108,17 +129,6 @@ func (IOFS) wrapError(op, path string, err error) error { } } -// dirEntry provides adapter from os.FileInfo to fs.DirEntry -type dirEntry struct { - fs.FileInfo -} - -var _ fs.DirEntry = dirEntry{} - -func (d dirEntry) Type() fs.FileMode { return d.FileInfo.Mode().Type() } - -func (d dirEntry) Info() (fs.FileInfo, error) { return d.FileInfo, nil } - // readDirFile provides adapter from afero.File to fs.ReadDirFile needed for correct Open type readDirFile struct { File @@ -134,7 +144,7 @@ func (r readDirFile) ReadDir(n int) ([]fs.DirEntry, error) { ret := make([]fs.DirEntry, len(items)) for i := range items { - ret[i] = dirEntry{items[i]} + ret[i] = common.FileInfoDirEntry{FileInfo: items[i]} } return ret, nil diff --git a/vendor/github.com/spf13/afero/ioutil.go b/vendor/github.com/spf13/afero/ioutil.go index a403133e2..386c9cdc2 100644 --- a/vendor/github.com/spf13/afero/ioutil.go +++ b/vendor/github.com/spf13/afero/ioutil.go @@ -141,7 +141,7 @@ func WriteFile(fs Fs, filename string, data []byte, perm os.FileMode) error { // We generate random temporary file names so that there's a good // chance the file doesn't exist yet - keeps the number of tries in // TempFile to a minimum. -var rand uint32 +var randNum uint32 var randmu sync.Mutex func reseed() uint32 { @@ -150,12 +150,12 @@ func reseed() uint32 { func nextRandom() string { randmu.Lock() - r := rand + r := randNum if r == 0 { r = reseed() } r = r*1664525 + 1013904223 // constants from Numerical Recipes - rand = r + randNum = r randmu.Unlock() return strconv.Itoa(int(1e9 + r%1e9))[1:] } @@ -194,7 +194,7 @@ func TempFile(fs Fs, dir, pattern string) (f File, err error) { if os.IsExist(err) { if nconflict++; nconflict > 10 { randmu.Lock() - rand = reseed() + randNum = reseed() randmu.Unlock() } continue @@ -226,7 +226,7 @@ func TempDir(fs Fs, dir, prefix string) (name string, err error) { if os.IsExist(err) { if nconflict++; nconflict > 10 { randmu.Lock() - rand = reseed() + randNum = reseed() randmu.Unlock() } continue diff --git a/vendor/github.com/spf13/afero/mem/file.go b/vendor/github.com/spf13/afero/mem/file.go index 5ef8b6a39..3cf4693b5 100644 --- a/vendor/github.com/spf13/afero/mem/file.go +++ b/vendor/github.com/spf13/afero/mem/file.go @@ -18,15 +18,20 @@ import ( "bytes" "errors" "io" + "io/fs" "os" "path/filepath" "sync" "sync/atomic" "time" + + "github.com/spf13/afero/internal/common" ) const FilePathSeparator = string(filepath.Separator) +var _ fs.ReadDirFile = &File{} + type File struct { // atomic requires 64-bit alignment for struct field access at int64 @@ -183,10 +188,23 @@ func (f *File) Readdirnames(n int) (names []string, err error) { return names, err } +// Implements fs.ReadDirFile +func (f *File) ReadDir(n int) ([]fs.DirEntry, error) { + fi, err := f.Readdir(n) + if err != nil { + return nil, err + } + di := make([]fs.DirEntry, len(fi)) + for i, f := range fi { + di[i] = common.FileInfoDirEntry{FileInfo: f} + } + return di, nil +} + func (f *File) Read(b []byte) (n int, err error) { f.fileData.Lock() defer f.fileData.Unlock() - if f.closed == true { + if f.closed { return 0, ErrFileClosed } if len(b) > 0 && int(f.at) == len(f.fileData.data) { @@ -214,7 +232,7 @@ func (f *File) ReadAt(b []byte, off int64) (n int, err error) { } func (f *File) Truncate(size int64) error { - if f.closed == true { + if f.closed { return ErrFileClosed } if f.readOnly { @@ -236,7 +254,7 @@ func (f *File) Truncate(size int64) error { } func (f *File) Seek(offset int64, whence int) (int64, error) { - if f.closed == true { + if f.closed { return 0, ErrFileClosed } switch whence { @@ -251,7 +269,7 @@ func (f *File) Seek(offset int64, whence int) (int64, error) { } func (f *File) Write(b []byte) (n int, err error) { - if f.closed == true { + if f.closed { return 0, ErrFileClosed } if f.readOnly { @@ -330,8 +348,8 @@ func (s *FileInfo) Size() int64 { var ( ErrFileClosed = errors.New("File is closed") - ErrOutOfRange = errors.New("Out of range") - ErrTooLarge = errors.New("Too large") + ErrOutOfRange = errors.New("out of range") + ErrTooLarge = errors.New("too large") ErrFileNotFound = os.ErrNotExist ErrFileExists = os.ErrExist ErrDestinationExists = os.ErrExist diff --git a/vendor/github.com/spf13/afero/unionFile.go b/vendor/github.com/spf13/afero/unionFile.go index 34f99a40c..333d367f4 100644 --- a/vendor/github.com/spf13/afero/unionFile.go +++ b/vendor/github.com/spf13/afero/unionFile.go @@ -65,7 +65,7 @@ func (f *UnionFile) ReadAt(s []byte, o int64) (int, error) { if f.Layer != nil { n, err := f.Layer.ReadAt(s, o) if (err == nil || err == io.EOF) && f.Base != nil { - _, err = f.Base.Seek(o+int64(n), os.SEEK_SET) + _, err = f.Base.Seek(o+int64(n), io.SeekStart) } return n, err } diff --git a/vendor/github.com/spf13/afero/util.go b/vendor/github.com/spf13/afero/util.go index 4f253f481..cb7de23f2 100644 --- a/vendor/github.com/spf13/afero/util.go +++ b/vendor/github.com/spf13/afero/util.go @@ -25,6 +25,7 @@ import ( "strings" "unicode" + "golang.org/x/text/runes" "golang.org/x/text/transform" "golang.org/x/text/unicode/norm" ) @@ -158,16 +159,12 @@ func UnicodeSanitize(s string) string { // Transform characters with accents into plain forms. func NeuterAccents(s string) string { - t := transform.Chain(norm.NFD, transform.RemoveFunc(isMn), norm.NFC) + t := transform.Chain(norm.NFD, runes.Remove(runes.In(unicode.Mn)), norm.NFC) result, _, _ := transform.String(t, string(s)) return result } -func isMn(r rune) bool { - return unicode.Is(unicode.Mn, r) // Mn: nonspacing marks -} - func (a Afero) FileContainsBytes(filename string, subslice []byte) (bool, error) { return FileContainsBytes(a.Fs, filename, subslice) } @@ -299,6 +296,9 @@ func IsEmpty(fs Fs, path string) (bool, error) { } defer f.Close() list, err := f.Readdir(-1) + if err != nil { + return false, err + } return len(list) == 0, nil } return fi.Size() == 0, nil diff --git a/vendor/github.com/stretchr/objx/Taskfile.yml b/vendor/github.com/stretchr/objx/Taskfile.yml index a749ac549..7746f516d 100644 --- a/vendor/github.com/stretchr/objx/Taskfile.yml +++ b/vendor/github.com/stretchr/objx/Taskfile.yml @@ -25,6 +25,6 @@ tasks: - go test -race ./... test-coverage: - desc: Runs go tests and calucates test coverage + desc: Runs go tests and calculates test coverage cmds: - go test -race -coverprofile=c.out ./... diff --git a/vendor/github.com/stretchr/testify/assert/assertion_compare.go b/vendor/github.com/stretchr/testify/assert/assertion_compare.go index 95d8e59da..b774da88d 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_compare.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_compare.go @@ -352,9 +352,9 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { // Greater asserts that the first element is greater than the second // -// assert.Greater(t, 2, 1) -// assert.Greater(t, float64(2), float64(1)) -// assert.Greater(t, "b", "a") +// assert.Greater(t, 2, 1) +// assert.Greater(t, float64(2), float64(1)) +// assert.Greater(t, "b", "a") func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -364,10 +364,10 @@ func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface // GreaterOrEqual asserts that the first element is greater than or equal to the second // -// assert.GreaterOrEqual(t, 2, 1) -// assert.GreaterOrEqual(t, 2, 2) -// assert.GreaterOrEqual(t, "b", "a") -// assert.GreaterOrEqual(t, "b", "b") +// assert.GreaterOrEqual(t, 2, 1) +// assert.GreaterOrEqual(t, 2, 2) +// assert.GreaterOrEqual(t, "b", "a") +// assert.GreaterOrEqual(t, "b", "b") func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -377,9 +377,9 @@ func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...in // Less asserts that the first element is less than the second // -// assert.Less(t, 1, 2) -// assert.Less(t, float64(1), float64(2)) -// assert.Less(t, "a", "b") +// assert.Less(t, 1, 2) +// assert.Less(t, float64(1), float64(2)) +// assert.Less(t, "a", "b") func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -389,10 +389,10 @@ func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) // LessOrEqual asserts that the first element is less than or equal to the second // -// assert.LessOrEqual(t, 1, 2) -// assert.LessOrEqual(t, 2, 2) -// assert.LessOrEqual(t, "a", "b") -// assert.LessOrEqual(t, "b", "b") +// assert.LessOrEqual(t, 1, 2) +// assert.LessOrEqual(t, 2, 2) +// assert.LessOrEqual(t, "a", "b") +// assert.LessOrEqual(t, "b", "b") func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -402,8 +402,8 @@ func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...inter // Positive asserts that the specified element is positive // -// assert.Positive(t, 1) -// assert.Positive(t, 1.23) +// assert.Positive(t, 1) +// assert.Positive(t, 1.23) func Positive(t TestingT, e interface{}, msgAndArgs ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -414,8 +414,8 @@ func Positive(t TestingT, e interface{}, msgAndArgs ...interface{}) bool { // Negative asserts that the specified element is negative // -// assert.Negative(t, -1) -// assert.Negative(t, -1.23) +// assert.Negative(t, -1) +// assert.Negative(t, -1.23) func Negative(t TestingT, e interface{}, msgAndArgs ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() diff --git a/vendor/github.com/stretchr/testify/assert/assertion_format.go b/vendor/github.com/stretchr/testify/assert/assertion_format.go index 7880b8f94..84dbd6c79 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_format.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_format.go @@ -22,9 +22,9 @@ func Conditionf(t TestingT, comp Comparison, msg string, args ...interface{}) bo // Containsf asserts that the specified string, list(array, slice...) or map contains the // specified substring or element. // -// assert.Containsf(t, "Hello World", "World", "error message %s", "formatted") -// assert.Containsf(t, ["Hello", "World"], "World", "error message %s", "formatted") -// assert.Containsf(t, {"Hello": "World"}, "Hello", "error message %s", "formatted") +// assert.Containsf(t, "Hello World", "World", "error message %s", "formatted") +// assert.Containsf(t, ["Hello", "World"], "World", "error message %s", "formatted") +// assert.Containsf(t, {"Hello": "World"}, "Hello", "error message %s", "formatted") func Containsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -56,7 +56,7 @@ func ElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg string // Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either // a slice or a channel with len == 0. // -// assert.Emptyf(t, obj, "error message %s", "formatted") +// assert.Emptyf(t, obj, "error message %s", "formatted") func Emptyf(t TestingT, object interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -66,7 +66,7 @@ func Emptyf(t TestingT, object interface{}, msg string, args ...interface{}) boo // Equalf asserts that two objects are equal. // -// assert.Equalf(t, 123, 123, "error message %s", "formatted") +// assert.Equalf(t, 123, 123, "error message %s", "formatted") // // Pointer variable equality is determined based on the equality of the // referenced values (as opposed to the memory addresses). Function equality @@ -81,8 +81,8 @@ func Equalf(t TestingT, expected interface{}, actual interface{}, msg string, ar // EqualErrorf asserts that a function returned an error (i.e. not `nil`) // and that it is equal to the provided error. // -// actualObj, err := SomeFunction() -// assert.EqualErrorf(t, err, expectedErrorString, "error message %s", "formatted") +// actualObj, err := SomeFunction() +// assert.EqualErrorf(t, err, expectedErrorString, "error message %s", "formatted") func EqualErrorf(t TestingT, theError error, errString string, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -90,10 +90,27 @@ func EqualErrorf(t TestingT, theError error, errString string, msg string, args return EqualError(t, theError, errString, append([]interface{}{msg}, args...)...) } +// EqualExportedValuesf asserts that the types of two objects are equal and their public +// fields are also equal. This is useful for comparing structs that have private fields +// that could potentially differ. +// +// type S struct { +// Exported int +// notExported int +// } +// assert.EqualExportedValuesf(t, S{1, 2}, S{1, 3}, "error message %s", "formatted") => true +// assert.EqualExportedValuesf(t, S{1, 2}, S{2, 3}, "error message %s", "formatted") => false +func EqualExportedValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return EqualExportedValues(t, expected, actual, append([]interface{}{msg}, args...)...) +} + // EqualValuesf asserts that two objects are equal or convertable to the same types // and equal. // -// assert.EqualValuesf(t, uint32(123), int32(123), "error message %s", "formatted") +// assert.EqualValuesf(t, uint32(123), int32(123), "error message %s", "formatted") func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -103,10 +120,10 @@ func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg stri // Errorf asserts that a function returned an error (i.e. not `nil`). // -// actualObj, err := SomeFunction() -// if assert.Errorf(t, err, "error message %s", "formatted") { -// assert.Equal(t, expectedErrorf, err) -// } +// actualObj, err := SomeFunction() +// if assert.Errorf(t, err, "error message %s", "formatted") { +// assert.Equal(t, expectedErrorf, err) +// } func Errorf(t TestingT, err error, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -126,8 +143,8 @@ func ErrorAsf(t TestingT, err error, target interface{}, msg string, args ...int // ErrorContainsf asserts that a function returned an error (i.e. not `nil`) // and that the error contains the specified substring. // -// actualObj, err := SomeFunction() -// assert.ErrorContainsf(t, err, expectedErrorSubString, "error message %s", "formatted") +// actualObj, err := SomeFunction() +// assert.ErrorContainsf(t, err, expectedErrorSubString, "error message %s", "formatted") func ErrorContainsf(t TestingT, theError error, contains string, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -147,7 +164,7 @@ func ErrorIsf(t TestingT, err error, target error, msg string, args ...interface // Eventuallyf asserts that given condition will be met in waitFor time, // periodically checking target function each tick. // -// assert.Eventuallyf(t, func() bool { return true; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") +// assert.Eventuallyf(t, func() bool { return true; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") func Eventuallyf(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -155,9 +172,34 @@ func Eventuallyf(t TestingT, condition func() bool, waitFor time.Duration, tick return Eventually(t, condition, waitFor, tick, append([]interface{}{msg}, args...)...) } +// EventuallyWithTf asserts that given condition will be met in waitFor time, +// periodically checking target function each tick. In contrast to Eventually, +// it supplies a CollectT to the condition function, so that the condition +// function can use the CollectT to call other assertions. +// The condition is considered "met" if no errors are raised in a tick. +// The supplied CollectT collects all errors from one tick (if there are any). +// If the condition is not met before waitFor, the collected errors of +// the last tick are copied to t. +// +// externalValue := false +// go func() { +// time.Sleep(8*time.Second) +// externalValue = true +// }() +// assert.EventuallyWithTf(t, func(c *assert.CollectT, "error message %s", "formatted") { +// // add assertions as needed; any assertion failure will fail the current tick +// assert.True(c, externalValue, "expected 'externalValue' to be true") +// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false") +func EventuallyWithTf(t TestingT, condition func(collect *CollectT), waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return EventuallyWithT(t, condition, waitFor, tick, append([]interface{}{msg}, args...)...) +} + // Exactlyf asserts that two objects are equal in value and type. // -// assert.Exactlyf(t, int32(123), int64(123), "error message %s", "formatted") +// assert.Exactlyf(t, int32(123), int64(123), "error message %s", "formatted") func Exactlyf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -183,7 +225,7 @@ func FailNowf(t TestingT, failureMessage string, msg string, args ...interface{} // Falsef asserts that the specified value is false. // -// assert.Falsef(t, myBool, "error message %s", "formatted") +// assert.Falsef(t, myBool, "error message %s", "formatted") func Falsef(t TestingT, value bool, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -202,9 +244,9 @@ func FileExistsf(t TestingT, path string, msg string, args ...interface{}) bool // Greaterf asserts that the first element is greater than the second // -// assert.Greaterf(t, 2, 1, "error message %s", "formatted") -// assert.Greaterf(t, float64(2), float64(1), "error message %s", "formatted") -// assert.Greaterf(t, "b", "a", "error message %s", "formatted") +// assert.Greaterf(t, 2, 1, "error message %s", "formatted") +// assert.Greaterf(t, float64(2), float64(1), "error message %s", "formatted") +// assert.Greaterf(t, "b", "a", "error message %s", "formatted") func Greaterf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -214,10 +256,10 @@ func Greaterf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...in // GreaterOrEqualf asserts that the first element is greater than or equal to the second // -// assert.GreaterOrEqualf(t, 2, 1, "error message %s", "formatted") -// assert.GreaterOrEqualf(t, 2, 2, "error message %s", "formatted") -// assert.GreaterOrEqualf(t, "b", "a", "error message %s", "formatted") -// assert.GreaterOrEqualf(t, "b", "b", "error message %s", "formatted") +// assert.GreaterOrEqualf(t, 2, 1, "error message %s", "formatted") +// assert.GreaterOrEqualf(t, 2, 2, "error message %s", "formatted") +// assert.GreaterOrEqualf(t, "b", "a", "error message %s", "formatted") +// assert.GreaterOrEqualf(t, "b", "b", "error message %s", "formatted") func GreaterOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -228,7 +270,7 @@ func GreaterOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, arg // HTTPBodyContainsf asserts that a specified handler returns a // body that contains a string. // -// assert.HTTPBodyContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") +// assert.HTTPBodyContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") // // Returns whether the assertion was successful (true) or not (false). func HTTPBodyContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool { @@ -241,7 +283,7 @@ func HTTPBodyContainsf(t TestingT, handler http.HandlerFunc, method string, url // HTTPBodyNotContainsf asserts that a specified handler returns a // body that does not contain a string. // -// assert.HTTPBodyNotContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") +// assert.HTTPBodyNotContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") // // Returns whether the assertion was successful (true) or not (false). func HTTPBodyNotContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool { @@ -253,7 +295,7 @@ func HTTPBodyNotContainsf(t TestingT, handler http.HandlerFunc, method string, u // HTTPErrorf asserts that a specified handler returns an error status code. // -// assert.HTTPErrorf(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// assert.HTTPErrorf(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} // // Returns whether the assertion was successful (true) or not (false). func HTTPErrorf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { @@ -265,7 +307,7 @@ func HTTPErrorf(t TestingT, handler http.HandlerFunc, method string, url string, // HTTPRedirectf asserts that a specified handler returns a redirect status code. // -// assert.HTTPRedirectf(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// assert.HTTPRedirectf(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} // // Returns whether the assertion was successful (true) or not (false). func HTTPRedirectf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { @@ -277,7 +319,7 @@ func HTTPRedirectf(t TestingT, handler http.HandlerFunc, method string, url stri // HTTPStatusCodef asserts that a specified handler returns a specified status code. // -// assert.HTTPStatusCodef(t, myHandler, "GET", "/notImplemented", nil, 501, "error message %s", "formatted") +// assert.HTTPStatusCodef(t, myHandler, "GET", "/notImplemented", nil, 501, "error message %s", "formatted") // // Returns whether the assertion was successful (true) or not (false). func HTTPStatusCodef(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, statuscode int, msg string, args ...interface{}) bool { @@ -289,7 +331,7 @@ func HTTPStatusCodef(t TestingT, handler http.HandlerFunc, method string, url st // HTTPSuccessf asserts that a specified handler returns a success status code. // -// assert.HTTPSuccessf(t, myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted") +// assert.HTTPSuccessf(t, myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted") // // Returns whether the assertion was successful (true) or not (false). func HTTPSuccessf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { @@ -301,7 +343,7 @@ func HTTPSuccessf(t TestingT, handler http.HandlerFunc, method string, url strin // Implementsf asserts that an object is implemented by the specified interface. // -// assert.Implementsf(t, (*MyInterface)(nil), new(MyObject), "error message %s", "formatted") +// assert.Implementsf(t, (*MyInterface)(nil), new(MyObject), "error message %s", "formatted") func Implementsf(t TestingT, interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -311,7 +353,7 @@ func Implementsf(t TestingT, interfaceObject interface{}, object interface{}, ms // InDeltaf asserts that the two numerals are within delta of each other. // -// assert.InDeltaf(t, math.Pi, 22/7.0, 0.01, "error message %s", "formatted") +// assert.InDeltaf(t, math.Pi, 22/7.0, 0.01, "error message %s", "formatted") func InDeltaf(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -353,9 +395,9 @@ func InEpsilonSlicef(t TestingT, expected interface{}, actual interface{}, epsil // IsDecreasingf asserts that the collection is decreasing // -// assert.IsDecreasingf(t, []int{2, 1, 0}, "error message %s", "formatted") -// assert.IsDecreasingf(t, []float{2, 1}, "error message %s", "formatted") -// assert.IsDecreasingf(t, []string{"b", "a"}, "error message %s", "formatted") +// assert.IsDecreasingf(t, []int{2, 1, 0}, "error message %s", "formatted") +// assert.IsDecreasingf(t, []float{2, 1}, "error message %s", "formatted") +// assert.IsDecreasingf(t, []string{"b", "a"}, "error message %s", "formatted") func IsDecreasingf(t TestingT, object interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -365,9 +407,9 @@ func IsDecreasingf(t TestingT, object interface{}, msg string, args ...interface // IsIncreasingf asserts that the collection is increasing // -// assert.IsIncreasingf(t, []int{1, 2, 3}, "error message %s", "formatted") -// assert.IsIncreasingf(t, []float{1, 2}, "error message %s", "formatted") -// assert.IsIncreasingf(t, []string{"a", "b"}, "error message %s", "formatted") +// assert.IsIncreasingf(t, []int{1, 2, 3}, "error message %s", "formatted") +// assert.IsIncreasingf(t, []float{1, 2}, "error message %s", "formatted") +// assert.IsIncreasingf(t, []string{"a", "b"}, "error message %s", "formatted") func IsIncreasingf(t TestingT, object interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -377,9 +419,9 @@ func IsIncreasingf(t TestingT, object interface{}, msg string, args ...interface // IsNonDecreasingf asserts that the collection is not decreasing // -// assert.IsNonDecreasingf(t, []int{1, 1, 2}, "error message %s", "formatted") -// assert.IsNonDecreasingf(t, []float{1, 2}, "error message %s", "formatted") -// assert.IsNonDecreasingf(t, []string{"a", "b"}, "error message %s", "formatted") +// assert.IsNonDecreasingf(t, []int{1, 1, 2}, "error message %s", "formatted") +// assert.IsNonDecreasingf(t, []float{1, 2}, "error message %s", "formatted") +// assert.IsNonDecreasingf(t, []string{"a", "b"}, "error message %s", "formatted") func IsNonDecreasingf(t TestingT, object interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -389,9 +431,9 @@ func IsNonDecreasingf(t TestingT, object interface{}, msg string, args ...interf // IsNonIncreasingf asserts that the collection is not increasing // -// assert.IsNonIncreasingf(t, []int{2, 1, 1}, "error message %s", "formatted") -// assert.IsNonIncreasingf(t, []float{2, 1}, "error message %s", "formatted") -// assert.IsNonIncreasingf(t, []string{"b", "a"}, "error message %s", "formatted") +// assert.IsNonIncreasingf(t, []int{2, 1, 1}, "error message %s", "formatted") +// assert.IsNonIncreasingf(t, []float{2, 1}, "error message %s", "formatted") +// assert.IsNonIncreasingf(t, []string{"b", "a"}, "error message %s", "formatted") func IsNonIncreasingf(t TestingT, object interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -409,7 +451,7 @@ func IsTypef(t TestingT, expectedType interface{}, object interface{}, msg strin // JSONEqf asserts that two JSON strings are equivalent. // -// assert.JSONEqf(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted") +// assert.JSONEqf(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted") func JSONEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -420,7 +462,7 @@ func JSONEqf(t TestingT, expected string, actual string, msg string, args ...int // Lenf asserts that the specified object has specific length. // Lenf also fails if the object has a type that len() not accept. // -// assert.Lenf(t, mySlice, 3, "error message %s", "formatted") +// assert.Lenf(t, mySlice, 3, "error message %s", "formatted") func Lenf(t TestingT, object interface{}, length int, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -430,9 +472,9 @@ func Lenf(t TestingT, object interface{}, length int, msg string, args ...interf // Lessf asserts that the first element is less than the second // -// assert.Lessf(t, 1, 2, "error message %s", "formatted") -// assert.Lessf(t, float64(1), float64(2), "error message %s", "formatted") -// assert.Lessf(t, "a", "b", "error message %s", "formatted") +// assert.Lessf(t, 1, 2, "error message %s", "formatted") +// assert.Lessf(t, float64(1), float64(2), "error message %s", "formatted") +// assert.Lessf(t, "a", "b", "error message %s", "formatted") func Lessf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -442,10 +484,10 @@ func Lessf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...inter // LessOrEqualf asserts that the first element is less than or equal to the second // -// assert.LessOrEqualf(t, 1, 2, "error message %s", "formatted") -// assert.LessOrEqualf(t, 2, 2, "error message %s", "formatted") -// assert.LessOrEqualf(t, "a", "b", "error message %s", "formatted") -// assert.LessOrEqualf(t, "b", "b", "error message %s", "formatted") +// assert.LessOrEqualf(t, 1, 2, "error message %s", "formatted") +// assert.LessOrEqualf(t, 2, 2, "error message %s", "formatted") +// assert.LessOrEqualf(t, "a", "b", "error message %s", "formatted") +// assert.LessOrEqualf(t, "b", "b", "error message %s", "formatted") func LessOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -455,8 +497,8 @@ func LessOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args . // Negativef asserts that the specified element is negative // -// assert.Negativef(t, -1, "error message %s", "formatted") -// assert.Negativef(t, -1.23, "error message %s", "formatted") +// assert.Negativef(t, -1, "error message %s", "formatted") +// assert.Negativef(t, -1.23, "error message %s", "formatted") func Negativef(t TestingT, e interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -467,7 +509,7 @@ func Negativef(t TestingT, e interface{}, msg string, args ...interface{}) bool // Neverf asserts that the given condition doesn't satisfy in waitFor time, // periodically checking the target function each tick. // -// assert.Neverf(t, func() bool { return false; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") +// assert.Neverf(t, func() bool { return false; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") func Neverf(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -477,7 +519,7 @@ func Neverf(t TestingT, condition func() bool, waitFor time.Duration, tick time. // Nilf asserts that the specified object is nil. // -// assert.Nilf(t, err, "error message %s", "formatted") +// assert.Nilf(t, err, "error message %s", "formatted") func Nilf(t TestingT, object interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -496,10 +538,10 @@ func NoDirExistsf(t TestingT, path string, msg string, args ...interface{}) bool // NoErrorf asserts that a function returned no error (i.e. `nil`). // -// actualObj, err := SomeFunction() -// if assert.NoErrorf(t, err, "error message %s", "formatted") { -// assert.Equal(t, expectedObj, actualObj) -// } +// actualObj, err := SomeFunction() +// if assert.NoErrorf(t, err, "error message %s", "formatted") { +// assert.Equal(t, expectedObj, actualObj) +// } func NoErrorf(t TestingT, err error, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -519,9 +561,9 @@ func NoFileExistsf(t TestingT, path string, msg string, args ...interface{}) boo // NotContainsf asserts that the specified string, list(array, slice...) or map does NOT contain the // specified substring or element. // -// assert.NotContainsf(t, "Hello World", "Earth", "error message %s", "formatted") -// assert.NotContainsf(t, ["Hello", "World"], "Earth", "error message %s", "formatted") -// assert.NotContainsf(t, {"Hello": "World"}, "Earth", "error message %s", "formatted") +// assert.NotContainsf(t, "Hello World", "Earth", "error message %s", "formatted") +// assert.NotContainsf(t, ["Hello", "World"], "Earth", "error message %s", "formatted") +// assert.NotContainsf(t, {"Hello": "World"}, "Earth", "error message %s", "formatted") func NotContainsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -532,9 +574,9 @@ func NotContainsf(t TestingT, s interface{}, contains interface{}, msg string, a // NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either // a slice or a channel with len == 0. // -// if assert.NotEmptyf(t, obj, "error message %s", "formatted") { -// assert.Equal(t, "two", obj[1]) -// } +// if assert.NotEmptyf(t, obj, "error message %s", "formatted") { +// assert.Equal(t, "two", obj[1]) +// } func NotEmptyf(t TestingT, object interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -544,7 +586,7 @@ func NotEmptyf(t TestingT, object interface{}, msg string, args ...interface{}) // NotEqualf asserts that the specified values are NOT equal. // -// assert.NotEqualf(t, obj1, obj2, "error message %s", "formatted") +// assert.NotEqualf(t, obj1, obj2, "error message %s", "formatted") // // Pointer variable equality is determined based on the equality of the // referenced values (as opposed to the memory addresses). @@ -557,7 +599,7 @@ func NotEqualf(t TestingT, expected interface{}, actual interface{}, msg string, // NotEqualValuesf asserts that two objects are not equal even when converted to the same type // -// assert.NotEqualValuesf(t, obj1, obj2, "error message %s", "formatted") +// assert.NotEqualValuesf(t, obj1, obj2, "error message %s", "formatted") func NotEqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -576,7 +618,7 @@ func NotErrorIsf(t TestingT, err error, target error, msg string, args ...interf // NotNilf asserts that the specified object is not nil. // -// assert.NotNilf(t, err, "error message %s", "formatted") +// assert.NotNilf(t, err, "error message %s", "formatted") func NotNilf(t TestingT, object interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -586,7 +628,7 @@ func NotNilf(t TestingT, object interface{}, msg string, args ...interface{}) bo // NotPanicsf asserts that the code inside the specified PanicTestFunc does NOT panic. // -// assert.NotPanicsf(t, func(){ RemainCalm() }, "error message %s", "formatted") +// assert.NotPanicsf(t, func(){ RemainCalm() }, "error message %s", "formatted") func NotPanicsf(t TestingT, f PanicTestFunc, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -596,8 +638,8 @@ func NotPanicsf(t TestingT, f PanicTestFunc, msg string, args ...interface{}) bo // NotRegexpf asserts that a specified regexp does not match a string. // -// assert.NotRegexpf(t, regexp.MustCompile("starts"), "it's starting", "error message %s", "formatted") -// assert.NotRegexpf(t, "^start", "it's not starting", "error message %s", "formatted") +// assert.NotRegexpf(t, regexp.MustCompile("starts"), "it's starting", "error message %s", "formatted") +// assert.NotRegexpf(t, "^start", "it's not starting", "error message %s", "formatted") func NotRegexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -607,7 +649,7 @@ func NotRegexpf(t TestingT, rx interface{}, str interface{}, msg string, args .. // NotSamef asserts that two pointers do not reference the same object. // -// assert.NotSamef(t, ptr1, ptr2, "error message %s", "formatted") +// assert.NotSamef(t, ptr1, ptr2, "error message %s", "formatted") // // Both arguments must be pointer variables. Pointer variable sameness is // determined based on the equality of both type and value. @@ -621,7 +663,7 @@ func NotSamef(t TestingT, expected interface{}, actual interface{}, msg string, // NotSubsetf asserts that the specified list(array, slice...) contains not all // elements given in the specified subset(array, slice...). // -// assert.NotSubsetf(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted") +// assert.NotSubsetf(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted") func NotSubsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -639,7 +681,7 @@ func NotZerof(t TestingT, i interface{}, msg string, args ...interface{}) bool { // Panicsf asserts that the code inside the specified PanicTestFunc panics. // -// assert.Panicsf(t, func(){ GoCrazy() }, "error message %s", "formatted") +// assert.Panicsf(t, func(){ GoCrazy() }, "error message %s", "formatted") func Panicsf(t TestingT, f PanicTestFunc, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -651,7 +693,7 @@ func Panicsf(t TestingT, f PanicTestFunc, msg string, args ...interface{}) bool // panics, and that the recovered panic value is an error that satisfies the // EqualError comparison. // -// assert.PanicsWithErrorf(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted") +// assert.PanicsWithErrorf(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted") func PanicsWithErrorf(t TestingT, errString string, f PanicTestFunc, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -662,7 +704,7 @@ func PanicsWithErrorf(t TestingT, errString string, f PanicTestFunc, msg string, // PanicsWithValuef asserts that the code inside the specified PanicTestFunc panics, and that // the recovered panic value equals the expected panic value. // -// assert.PanicsWithValuef(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted") +// assert.PanicsWithValuef(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted") func PanicsWithValuef(t TestingT, expected interface{}, f PanicTestFunc, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -672,8 +714,8 @@ func PanicsWithValuef(t TestingT, expected interface{}, f PanicTestFunc, msg str // Positivef asserts that the specified element is positive // -// assert.Positivef(t, 1, "error message %s", "formatted") -// assert.Positivef(t, 1.23, "error message %s", "formatted") +// assert.Positivef(t, 1, "error message %s", "formatted") +// assert.Positivef(t, 1.23, "error message %s", "formatted") func Positivef(t TestingT, e interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -683,8 +725,8 @@ func Positivef(t TestingT, e interface{}, msg string, args ...interface{}) bool // Regexpf asserts that a specified regexp matches a string. // -// assert.Regexpf(t, regexp.MustCompile("start"), "it's starting", "error message %s", "formatted") -// assert.Regexpf(t, "start...$", "it's not starting", "error message %s", "formatted") +// assert.Regexpf(t, regexp.MustCompile("start"), "it's starting", "error message %s", "formatted") +// assert.Regexpf(t, "start...$", "it's not starting", "error message %s", "formatted") func Regexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -694,7 +736,7 @@ func Regexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...in // Samef asserts that two pointers reference the same object. // -// assert.Samef(t, ptr1, ptr2, "error message %s", "formatted") +// assert.Samef(t, ptr1, ptr2, "error message %s", "formatted") // // Both arguments must be pointer variables. Pointer variable sameness is // determined based on the equality of both type and value. @@ -708,7 +750,7 @@ func Samef(t TestingT, expected interface{}, actual interface{}, msg string, arg // Subsetf asserts that the specified list(array, slice...) contains all // elements given in the specified subset(array, slice...). // -// assert.Subsetf(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted") +// assert.Subsetf(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted") func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -718,7 +760,7 @@ func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args // Truef asserts that the specified value is true. // -// assert.Truef(t, myBool, "error message %s", "formatted") +// assert.Truef(t, myBool, "error message %s", "formatted") func Truef(t TestingT, value bool, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -728,7 +770,7 @@ func Truef(t TestingT, value bool, msg string, args ...interface{}) bool { // WithinDurationf asserts that the two times are within duration delta of each other. // -// assert.WithinDurationf(t, time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted") +// assert.WithinDurationf(t, time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted") func WithinDurationf(t TestingT, expected time.Time, actual time.Time, delta time.Duration, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -738,7 +780,7 @@ func WithinDurationf(t TestingT, expected time.Time, actual time.Time, delta tim // WithinRangef asserts that a time is within a time range (inclusive). // -// assert.WithinRangef(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second), "error message %s", "formatted") +// assert.WithinRangef(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second), "error message %s", "formatted") func WithinRangef(t TestingT, actual time.Time, start time.Time, end time.Time, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() diff --git a/vendor/github.com/stretchr/testify/assert/assertion_forward.go b/vendor/github.com/stretchr/testify/assert/assertion_forward.go index 339515b8b..b1d94aec5 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_forward.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_forward.go @@ -30,9 +30,9 @@ func (a *Assertions) Conditionf(comp Comparison, msg string, args ...interface{} // Contains asserts that the specified string, list(array, slice...) or map contains the // specified substring or element. // -// a.Contains("Hello World", "World") -// a.Contains(["Hello", "World"], "World") -// a.Contains({"Hello": "World"}, "Hello") +// a.Contains("Hello World", "World") +// a.Contains(["Hello", "World"], "World") +// a.Contains({"Hello": "World"}, "Hello") func (a *Assertions) Contains(s interface{}, contains interface{}, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -43,9 +43,9 @@ func (a *Assertions) Contains(s interface{}, contains interface{}, msgAndArgs .. // Containsf asserts that the specified string, list(array, slice...) or map contains the // specified substring or element. // -// a.Containsf("Hello World", "World", "error message %s", "formatted") -// a.Containsf(["Hello", "World"], "World", "error message %s", "formatted") -// a.Containsf({"Hello": "World"}, "Hello", "error message %s", "formatted") +// a.Containsf("Hello World", "World", "error message %s", "formatted") +// a.Containsf(["Hello", "World"], "World", "error message %s", "formatted") +// a.Containsf({"Hello": "World"}, "Hello", "error message %s", "formatted") func (a *Assertions) Containsf(s interface{}, contains interface{}, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -98,7 +98,7 @@ func (a *Assertions) ElementsMatchf(listA interface{}, listB interface{}, msg st // Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either // a slice or a channel with len == 0. // -// a.Empty(obj) +// a.Empty(obj) func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -109,7 +109,7 @@ func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) bool { // Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either // a slice or a channel with len == 0. // -// a.Emptyf(obj, "error message %s", "formatted") +// a.Emptyf(obj, "error message %s", "formatted") func (a *Assertions) Emptyf(object interface{}, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -119,7 +119,7 @@ func (a *Assertions) Emptyf(object interface{}, msg string, args ...interface{}) // Equal asserts that two objects are equal. // -// a.Equal(123, 123) +// a.Equal(123, 123) // // Pointer variable equality is determined based on the equality of the // referenced values (as opposed to the memory addresses). Function equality @@ -134,8 +134,8 @@ func (a *Assertions) Equal(expected interface{}, actual interface{}, msgAndArgs // EqualError asserts that a function returned an error (i.e. not `nil`) // and that it is equal to the provided error. // -// actualObj, err := SomeFunction() -// a.EqualError(err, expectedErrorString) +// actualObj, err := SomeFunction() +// a.EqualError(err, expectedErrorString) func (a *Assertions) EqualError(theError error, errString string, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -146,8 +146,8 @@ func (a *Assertions) EqualError(theError error, errString string, msgAndArgs ... // EqualErrorf asserts that a function returned an error (i.e. not `nil`) // and that it is equal to the provided error. // -// actualObj, err := SomeFunction() -// a.EqualErrorf(err, expectedErrorString, "error message %s", "formatted") +// actualObj, err := SomeFunction() +// a.EqualErrorf(err, expectedErrorString, "error message %s", "formatted") func (a *Assertions) EqualErrorf(theError error, errString string, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -155,10 +155,44 @@ func (a *Assertions) EqualErrorf(theError error, errString string, msg string, a return EqualErrorf(a.t, theError, errString, msg, args...) } +// EqualExportedValues asserts that the types of two objects are equal and their public +// fields are also equal. This is useful for comparing structs that have private fields +// that could potentially differ. +// +// type S struct { +// Exported int +// notExported int +// } +// a.EqualExportedValues(S{1, 2}, S{1, 3}) => true +// a.EqualExportedValues(S{1, 2}, S{2, 3}) => false +func (a *Assertions) EqualExportedValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return EqualExportedValues(a.t, expected, actual, msgAndArgs...) +} + +// EqualExportedValuesf asserts that the types of two objects are equal and their public +// fields are also equal. This is useful for comparing structs that have private fields +// that could potentially differ. +// +// type S struct { +// Exported int +// notExported int +// } +// a.EqualExportedValuesf(S{1, 2}, S{1, 3}, "error message %s", "formatted") => true +// a.EqualExportedValuesf(S{1, 2}, S{2, 3}, "error message %s", "formatted") => false +func (a *Assertions) EqualExportedValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return EqualExportedValuesf(a.t, expected, actual, msg, args...) +} + // EqualValues asserts that two objects are equal or convertable to the same types // and equal. // -// a.EqualValues(uint32(123), int32(123)) +// a.EqualValues(uint32(123), int32(123)) func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -169,7 +203,7 @@ func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAn // EqualValuesf asserts that two objects are equal or convertable to the same types // and equal. // -// a.EqualValuesf(uint32(123), int32(123), "error message %s", "formatted") +// a.EqualValuesf(uint32(123), int32(123), "error message %s", "formatted") func (a *Assertions) EqualValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -179,7 +213,7 @@ func (a *Assertions) EqualValuesf(expected interface{}, actual interface{}, msg // Equalf asserts that two objects are equal. // -// a.Equalf(123, 123, "error message %s", "formatted") +// a.Equalf(123, 123, "error message %s", "formatted") // // Pointer variable equality is determined based on the equality of the // referenced values (as opposed to the memory addresses). Function equality @@ -193,10 +227,10 @@ func (a *Assertions) Equalf(expected interface{}, actual interface{}, msg string // Error asserts that a function returned an error (i.e. not `nil`). // -// actualObj, err := SomeFunction() -// if a.Error(err) { -// assert.Equal(t, expectedError, err) -// } +// actualObj, err := SomeFunction() +// if a.Error(err) { +// assert.Equal(t, expectedError, err) +// } func (a *Assertions) Error(err error, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -225,8 +259,8 @@ func (a *Assertions) ErrorAsf(err error, target interface{}, msg string, args .. // ErrorContains asserts that a function returned an error (i.e. not `nil`) // and that the error contains the specified substring. // -// actualObj, err := SomeFunction() -// a.ErrorContains(err, expectedErrorSubString) +// actualObj, err := SomeFunction() +// a.ErrorContains(err, expectedErrorSubString) func (a *Assertions) ErrorContains(theError error, contains string, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -237,8 +271,8 @@ func (a *Assertions) ErrorContains(theError error, contains string, msgAndArgs . // ErrorContainsf asserts that a function returned an error (i.e. not `nil`) // and that the error contains the specified substring. // -// actualObj, err := SomeFunction() -// a.ErrorContainsf(err, expectedErrorSubString, "error message %s", "formatted") +// actualObj, err := SomeFunction() +// a.ErrorContainsf(err, expectedErrorSubString, "error message %s", "formatted") func (a *Assertions) ErrorContainsf(theError error, contains string, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -266,10 +300,10 @@ func (a *Assertions) ErrorIsf(err error, target error, msg string, args ...inter // Errorf asserts that a function returned an error (i.e. not `nil`). // -// actualObj, err := SomeFunction() -// if a.Errorf(err, "error message %s", "formatted") { -// assert.Equal(t, expectedErrorf, err) -// } +// actualObj, err := SomeFunction() +// if a.Errorf(err, "error message %s", "formatted") { +// assert.Equal(t, expectedErrorf, err) +// } func (a *Assertions) Errorf(err error, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -280,7 +314,7 @@ func (a *Assertions) Errorf(err error, msg string, args ...interface{}) bool { // Eventually asserts that given condition will be met in waitFor time, // periodically checking target function each tick. // -// a.Eventually(func() bool { return true; }, time.Second, 10*time.Millisecond) +// a.Eventually(func() bool { return true; }, time.Second, 10*time.Millisecond) func (a *Assertions) Eventually(condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -288,10 +322,60 @@ func (a *Assertions) Eventually(condition func() bool, waitFor time.Duration, ti return Eventually(a.t, condition, waitFor, tick, msgAndArgs...) } +// EventuallyWithT asserts that given condition will be met in waitFor time, +// periodically checking target function each tick. In contrast to Eventually, +// it supplies a CollectT to the condition function, so that the condition +// function can use the CollectT to call other assertions. +// The condition is considered "met" if no errors are raised in a tick. +// The supplied CollectT collects all errors from one tick (if there are any). +// If the condition is not met before waitFor, the collected errors of +// the last tick are copied to t. +// +// externalValue := false +// go func() { +// time.Sleep(8*time.Second) +// externalValue = true +// }() +// a.EventuallyWithT(func(c *assert.CollectT) { +// // add assertions as needed; any assertion failure will fail the current tick +// assert.True(c, externalValue, "expected 'externalValue' to be true") +// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false") +func (a *Assertions) EventuallyWithT(condition func(collect *CollectT), waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return EventuallyWithT(a.t, condition, waitFor, tick, msgAndArgs...) +} + +// EventuallyWithTf asserts that given condition will be met in waitFor time, +// periodically checking target function each tick. In contrast to Eventually, +// it supplies a CollectT to the condition function, so that the condition +// function can use the CollectT to call other assertions. +// The condition is considered "met" if no errors are raised in a tick. +// The supplied CollectT collects all errors from one tick (if there are any). +// If the condition is not met before waitFor, the collected errors of +// the last tick are copied to t. +// +// externalValue := false +// go func() { +// time.Sleep(8*time.Second) +// externalValue = true +// }() +// a.EventuallyWithTf(func(c *assert.CollectT, "error message %s", "formatted") { +// // add assertions as needed; any assertion failure will fail the current tick +// assert.True(c, externalValue, "expected 'externalValue' to be true") +// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false") +func (a *Assertions) EventuallyWithTf(condition func(collect *CollectT), waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return EventuallyWithTf(a.t, condition, waitFor, tick, msg, args...) +} + // Eventuallyf asserts that given condition will be met in waitFor time, // periodically checking target function each tick. // -// a.Eventuallyf(func() bool { return true; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") +// a.Eventuallyf(func() bool { return true; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") func (a *Assertions) Eventuallyf(condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -301,7 +385,7 @@ func (a *Assertions) Eventuallyf(condition func() bool, waitFor time.Duration, t // Exactly asserts that two objects are equal in value and type. // -// a.Exactly(int32(123), int64(123)) +// a.Exactly(int32(123), int64(123)) func (a *Assertions) Exactly(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -311,7 +395,7 @@ func (a *Assertions) Exactly(expected interface{}, actual interface{}, msgAndArg // Exactlyf asserts that two objects are equal in value and type. // -// a.Exactlyf(int32(123), int64(123), "error message %s", "formatted") +// a.Exactlyf(int32(123), int64(123), "error message %s", "formatted") func (a *Assertions) Exactlyf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -353,7 +437,7 @@ func (a *Assertions) Failf(failureMessage string, msg string, args ...interface{ // False asserts that the specified value is false. // -// a.False(myBool) +// a.False(myBool) func (a *Assertions) False(value bool, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -363,7 +447,7 @@ func (a *Assertions) False(value bool, msgAndArgs ...interface{}) bool { // Falsef asserts that the specified value is false. // -// a.Falsef(myBool, "error message %s", "formatted") +// a.Falsef(myBool, "error message %s", "formatted") func (a *Assertions) Falsef(value bool, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -391,9 +475,9 @@ func (a *Assertions) FileExistsf(path string, msg string, args ...interface{}) b // Greater asserts that the first element is greater than the second // -// a.Greater(2, 1) -// a.Greater(float64(2), float64(1)) -// a.Greater("b", "a") +// a.Greater(2, 1) +// a.Greater(float64(2), float64(1)) +// a.Greater("b", "a") func (a *Assertions) Greater(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -403,10 +487,10 @@ func (a *Assertions) Greater(e1 interface{}, e2 interface{}, msgAndArgs ...inter // GreaterOrEqual asserts that the first element is greater than or equal to the second // -// a.GreaterOrEqual(2, 1) -// a.GreaterOrEqual(2, 2) -// a.GreaterOrEqual("b", "a") -// a.GreaterOrEqual("b", "b") +// a.GreaterOrEqual(2, 1) +// a.GreaterOrEqual(2, 2) +// a.GreaterOrEqual("b", "a") +// a.GreaterOrEqual("b", "b") func (a *Assertions) GreaterOrEqual(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -416,10 +500,10 @@ func (a *Assertions) GreaterOrEqual(e1 interface{}, e2 interface{}, msgAndArgs . // GreaterOrEqualf asserts that the first element is greater than or equal to the second // -// a.GreaterOrEqualf(2, 1, "error message %s", "formatted") -// a.GreaterOrEqualf(2, 2, "error message %s", "formatted") -// a.GreaterOrEqualf("b", "a", "error message %s", "formatted") -// a.GreaterOrEqualf("b", "b", "error message %s", "formatted") +// a.GreaterOrEqualf(2, 1, "error message %s", "formatted") +// a.GreaterOrEqualf(2, 2, "error message %s", "formatted") +// a.GreaterOrEqualf("b", "a", "error message %s", "formatted") +// a.GreaterOrEqualf("b", "b", "error message %s", "formatted") func (a *Assertions) GreaterOrEqualf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -429,9 +513,9 @@ func (a *Assertions) GreaterOrEqualf(e1 interface{}, e2 interface{}, msg string, // Greaterf asserts that the first element is greater than the second // -// a.Greaterf(2, 1, "error message %s", "formatted") -// a.Greaterf(float64(2), float64(1), "error message %s", "formatted") -// a.Greaterf("b", "a", "error message %s", "formatted") +// a.Greaterf(2, 1, "error message %s", "formatted") +// a.Greaterf(float64(2), float64(1), "error message %s", "formatted") +// a.Greaterf("b", "a", "error message %s", "formatted") func (a *Assertions) Greaterf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -442,7 +526,7 @@ func (a *Assertions) Greaterf(e1 interface{}, e2 interface{}, msg string, args . // HTTPBodyContains asserts that a specified handler returns a // body that contains a string. // -// a.HTTPBodyContains(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") +// a.HTTPBodyContains(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") // // Returns whether the assertion was successful (true) or not (false). func (a *Assertions) HTTPBodyContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool { @@ -455,7 +539,7 @@ func (a *Assertions) HTTPBodyContains(handler http.HandlerFunc, method string, u // HTTPBodyContainsf asserts that a specified handler returns a // body that contains a string. // -// a.HTTPBodyContainsf(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") +// a.HTTPBodyContainsf(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") // // Returns whether the assertion was successful (true) or not (false). func (a *Assertions) HTTPBodyContainsf(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool { @@ -468,7 +552,7 @@ func (a *Assertions) HTTPBodyContainsf(handler http.HandlerFunc, method string, // HTTPBodyNotContains asserts that a specified handler returns a // body that does not contain a string. // -// a.HTTPBodyNotContains(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") +// a.HTTPBodyNotContains(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") // // Returns whether the assertion was successful (true) or not (false). func (a *Assertions) HTTPBodyNotContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool { @@ -481,7 +565,7 @@ func (a *Assertions) HTTPBodyNotContains(handler http.HandlerFunc, method string // HTTPBodyNotContainsf asserts that a specified handler returns a // body that does not contain a string. // -// a.HTTPBodyNotContainsf(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") +// a.HTTPBodyNotContainsf(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") // // Returns whether the assertion was successful (true) or not (false). func (a *Assertions) HTTPBodyNotContainsf(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool { @@ -493,7 +577,7 @@ func (a *Assertions) HTTPBodyNotContainsf(handler http.HandlerFunc, method strin // HTTPError asserts that a specified handler returns an error status code. // -// a.HTTPError(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// a.HTTPError(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} // // Returns whether the assertion was successful (true) or not (false). func (a *Assertions) HTTPError(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) bool { @@ -505,7 +589,7 @@ func (a *Assertions) HTTPError(handler http.HandlerFunc, method string, url stri // HTTPErrorf asserts that a specified handler returns an error status code. // -// a.HTTPErrorf(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// a.HTTPErrorf(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} // // Returns whether the assertion was successful (true) or not (false). func (a *Assertions) HTTPErrorf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { @@ -517,7 +601,7 @@ func (a *Assertions) HTTPErrorf(handler http.HandlerFunc, method string, url str // HTTPRedirect asserts that a specified handler returns a redirect status code. // -// a.HTTPRedirect(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// a.HTTPRedirect(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} // // Returns whether the assertion was successful (true) or not (false). func (a *Assertions) HTTPRedirect(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) bool { @@ -529,7 +613,7 @@ func (a *Assertions) HTTPRedirect(handler http.HandlerFunc, method string, url s // HTTPRedirectf asserts that a specified handler returns a redirect status code. // -// a.HTTPRedirectf(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// a.HTTPRedirectf(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} // // Returns whether the assertion was successful (true) or not (false). func (a *Assertions) HTTPRedirectf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { @@ -541,7 +625,7 @@ func (a *Assertions) HTTPRedirectf(handler http.HandlerFunc, method string, url // HTTPStatusCode asserts that a specified handler returns a specified status code. // -// a.HTTPStatusCode(myHandler, "GET", "/notImplemented", nil, 501) +// a.HTTPStatusCode(myHandler, "GET", "/notImplemented", nil, 501) // // Returns whether the assertion was successful (true) or not (false). func (a *Assertions) HTTPStatusCode(handler http.HandlerFunc, method string, url string, values url.Values, statuscode int, msgAndArgs ...interface{}) bool { @@ -553,7 +637,7 @@ func (a *Assertions) HTTPStatusCode(handler http.HandlerFunc, method string, url // HTTPStatusCodef asserts that a specified handler returns a specified status code. // -// a.HTTPStatusCodef(myHandler, "GET", "/notImplemented", nil, 501, "error message %s", "formatted") +// a.HTTPStatusCodef(myHandler, "GET", "/notImplemented", nil, 501, "error message %s", "formatted") // // Returns whether the assertion was successful (true) or not (false). func (a *Assertions) HTTPStatusCodef(handler http.HandlerFunc, method string, url string, values url.Values, statuscode int, msg string, args ...interface{}) bool { @@ -565,7 +649,7 @@ func (a *Assertions) HTTPStatusCodef(handler http.HandlerFunc, method string, ur // HTTPSuccess asserts that a specified handler returns a success status code. // -// a.HTTPSuccess(myHandler, "POST", "http://www.google.com", nil) +// a.HTTPSuccess(myHandler, "POST", "http://www.google.com", nil) // // Returns whether the assertion was successful (true) or not (false). func (a *Assertions) HTTPSuccess(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) bool { @@ -577,7 +661,7 @@ func (a *Assertions) HTTPSuccess(handler http.HandlerFunc, method string, url st // HTTPSuccessf asserts that a specified handler returns a success status code. // -// a.HTTPSuccessf(myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted") +// a.HTTPSuccessf(myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted") // // Returns whether the assertion was successful (true) or not (false). func (a *Assertions) HTTPSuccessf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { @@ -589,7 +673,7 @@ func (a *Assertions) HTTPSuccessf(handler http.HandlerFunc, method string, url s // Implements asserts that an object is implemented by the specified interface. // -// a.Implements((*MyInterface)(nil), new(MyObject)) +// a.Implements((*MyInterface)(nil), new(MyObject)) func (a *Assertions) Implements(interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -599,7 +683,7 @@ func (a *Assertions) Implements(interfaceObject interface{}, object interface{}, // Implementsf asserts that an object is implemented by the specified interface. // -// a.Implementsf((*MyInterface)(nil), new(MyObject), "error message %s", "formatted") +// a.Implementsf((*MyInterface)(nil), new(MyObject), "error message %s", "formatted") func (a *Assertions) Implementsf(interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -609,7 +693,7 @@ func (a *Assertions) Implementsf(interfaceObject interface{}, object interface{} // InDelta asserts that the two numerals are within delta of each other. // -// a.InDelta(math.Pi, 22/7.0, 0.01) +// a.InDelta(math.Pi, 22/7.0, 0.01) func (a *Assertions) InDelta(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -651,7 +735,7 @@ func (a *Assertions) InDeltaSlicef(expected interface{}, actual interface{}, del // InDeltaf asserts that the two numerals are within delta of each other. // -// a.InDeltaf(math.Pi, 22/7.0, 0.01, "error message %s", "formatted") +// a.InDeltaf(math.Pi, 22/7.0, 0.01, "error message %s", "formatted") func (a *Assertions) InDeltaf(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -693,9 +777,9 @@ func (a *Assertions) InEpsilonf(expected interface{}, actual interface{}, epsilo // IsDecreasing asserts that the collection is decreasing // -// a.IsDecreasing([]int{2, 1, 0}) -// a.IsDecreasing([]float{2, 1}) -// a.IsDecreasing([]string{"b", "a"}) +// a.IsDecreasing([]int{2, 1, 0}) +// a.IsDecreasing([]float{2, 1}) +// a.IsDecreasing([]string{"b", "a"}) func (a *Assertions) IsDecreasing(object interface{}, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -705,9 +789,9 @@ func (a *Assertions) IsDecreasing(object interface{}, msgAndArgs ...interface{}) // IsDecreasingf asserts that the collection is decreasing // -// a.IsDecreasingf([]int{2, 1, 0}, "error message %s", "formatted") -// a.IsDecreasingf([]float{2, 1}, "error message %s", "formatted") -// a.IsDecreasingf([]string{"b", "a"}, "error message %s", "formatted") +// a.IsDecreasingf([]int{2, 1, 0}, "error message %s", "formatted") +// a.IsDecreasingf([]float{2, 1}, "error message %s", "formatted") +// a.IsDecreasingf([]string{"b", "a"}, "error message %s", "formatted") func (a *Assertions) IsDecreasingf(object interface{}, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -717,9 +801,9 @@ func (a *Assertions) IsDecreasingf(object interface{}, msg string, args ...inter // IsIncreasing asserts that the collection is increasing // -// a.IsIncreasing([]int{1, 2, 3}) -// a.IsIncreasing([]float{1, 2}) -// a.IsIncreasing([]string{"a", "b"}) +// a.IsIncreasing([]int{1, 2, 3}) +// a.IsIncreasing([]float{1, 2}) +// a.IsIncreasing([]string{"a", "b"}) func (a *Assertions) IsIncreasing(object interface{}, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -729,9 +813,9 @@ func (a *Assertions) IsIncreasing(object interface{}, msgAndArgs ...interface{}) // IsIncreasingf asserts that the collection is increasing // -// a.IsIncreasingf([]int{1, 2, 3}, "error message %s", "formatted") -// a.IsIncreasingf([]float{1, 2}, "error message %s", "formatted") -// a.IsIncreasingf([]string{"a", "b"}, "error message %s", "formatted") +// a.IsIncreasingf([]int{1, 2, 3}, "error message %s", "formatted") +// a.IsIncreasingf([]float{1, 2}, "error message %s", "formatted") +// a.IsIncreasingf([]string{"a", "b"}, "error message %s", "formatted") func (a *Assertions) IsIncreasingf(object interface{}, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -741,9 +825,9 @@ func (a *Assertions) IsIncreasingf(object interface{}, msg string, args ...inter // IsNonDecreasing asserts that the collection is not decreasing // -// a.IsNonDecreasing([]int{1, 1, 2}) -// a.IsNonDecreasing([]float{1, 2}) -// a.IsNonDecreasing([]string{"a", "b"}) +// a.IsNonDecreasing([]int{1, 1, 2}) +// a.IsNonDecreasing([]float{1, 2}) +// a.IsNonDecreasing([]string{"a", "b"}) func (a *Assertions) IsNonDecreasing(object interface{}, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -753,9 +837,9 @@ func (a *Assertions) IsNonDecreasing(object interface{}, msgAndArgs ...interface // IsNonDecreasingf asserts that the collection is not decreasing // -// a.IsNonDecreasingf([]int{1, 1, 2}, "error message %s", "formatted") -// a.IsNonDecreasingf([]float{1, 2}, "error message %s", "formatted") -// a.IsNonDecreasingf([]string{"a", "b"}, "error message %s", "formatted") +// a.IsNonDecreasingf([]int{1, 1, 2}, "error message %s", "formatted") +// a.IsNonDecreasingf([]float{1, 2}, "error message %s", "formatted") +// a.IsNonDecreasingf([]string{"a", "b"}, "error message %s", "formatted") func (a *Assertions) IsNonDecreasingf(object interface{}, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -765,9 +849,9 @@ func (a *Assertions) IsNonDecreasingf(object interface{}, msg string, args ...in // IsNonIncreasing asserts that the collection is not increasing // -// a.IsNonIncreasing([]int{2, 1, 1}) -// a.IsNonIncreasing([]float{2, 1}) -// a.IsNonIncreasing([]string{"b", "a"}) +// a.IsNonIncreasing([]int{2, 1, 1}) +// a.IsNonIncreasing([]float{2, 1}) +// a.IsNonIncreasing([]string{"b", "a"}) func (a *Assertions) IsNonIncreasing(object interface{}, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -777,9 +861,9 @@ func (a *Assertions) IsNonIncreasing(object interface{}, msgAndArgs ...interface // IsNonIncreasingf asserts that the collection is not increasing // -// a.IsNonIncreasingf([]int{2, 1, 1}, "error message %s", "formatted") -// a.IsNonIncreasingf([]float{2, 1}, "error message %s", "formatted") -// a.IsNonIncreasingf([]string{"b", "a"}, "error message %s", "formatted") +// a.IsNonIncreasingf([]int{2, 1, 1}, "error message %s", "formatted") +// a.IsNonIncreasingf([]float{2, 1}, "error message %s", "formatted") +// a.IsNonIncreasingf([]string{"b", "a"}, "error message %s", "formatted") func (a *Assertions) IsNonIncreasingf(object interface{}, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -805,7 +889,7 @@ func (a *Assertions) IsTypef(expectedType interface{}, object interface{}, msg s // JSONEq asserts that two JSON strings are equivalent. // -// a.JSONEq(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) +// a.JSONEq(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) func (a *Assertions) JSONEq(expected string, actual string, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -815,7 +899,7 @@ func (a *Assertions) JSONEq(expected string, actual string, msgAndArgs ...interf // JSONEqf asserts that two JSON strings are equivalent. // -// a.JSONEqf(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted") +// a.JSONEqf(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted") func (a *Assertions) JSONEqf(expected string, actual string, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -826,7 +910,7 @@ func (a *Assertions) JSONEqf(expected string, actual string, msg string, args .. // Len asserts that the specified object has specific length. // Len also fails if the object has a type that len() not accept. // -// a.Len(mySlice, 3) +// a.Len(mySlice, 3) func (a *Assertions) Len(object interface{}, length int, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -837,7 +921,7 @@ func (a *Assertions) Len(object interface{}, length int, msgAndArgs ...interface // Lenf asserts that the specified object has specific length. // Lenf also fails if the object has a type that len() not accept. // -// a.Lenf(mySlice, 3, "error message %s", "formatted") +// a.Lenf(mySlice, 3, "error message %s", "formatted") func (a *Assertions) Lenf(object interface{}, length int, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -847,9 +931,9 @@ func (a *Assertions) Lenf(object interface{}, length int, msg string, args ...in // Less asserts that the first element is less than the second // -// a.Less(1, 2) -// a.Less(float64(1), float64(2)) -// a.Less("a", "b") +// a.Less(1, 2) +// a.Less(float64(1), float64(2)) +// a.Less("a", "b") func (a *Assertions) Less(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -859,10 +943,10 @@ func (a *Assertions) Less(e1 interface{}, e2 interface{}, msgAndArgs ...interfac // LessOrEqual asserts that the first element is less than or equal to the second // -// a.LessOrEqual(1, 2) -// a.LessOrEqual(2, 2) -// a.LessOrEqual("a", "b") -// a.LessOrEqual("b", "b") +// a.LessOrEqual(1, 2) +// a.LessOrEqual(2, 2) +// a.LessOrEqual("a", "b") +// a.LessOrEqual("b", "b") func (a *Assertions) LessOrEqual(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -872,10 +956,10 @@ func (a *Assertions) LessOrEqual(e1 interface{}, e2 interface{}, msgAndArgs ...i // LessOrEqualf asserts that the first element is less than or equal to the second // -// a.LessOrEqualf(1, 2, "error message %s", "formatted") -// a.LessOrEqualf(2, 2, "error message %s", "formatted") -// a.LessOrEqualf("a", "b", "error message %s", "formatted") -// a.LessOrEqualf("b", "b", "error message %s", "formatted") +// a.LessOrEqualf(1, 2, "error message %s", "formatted") +// a.LessOrEqualf(2, 2, "error message %s", "formatted") +// a.LessOrEqualf("a", "b", "error message %s", "formatted") +// a.LessOrEqualf("b", "b", "error message %s", "formatted") func (a *Assertions) LessOrEqualf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -885,9 +969,9 @@ func (a *Assertions) LessOrEqualf(e1 interface{}, e2 interface{}, msg string, ar // Lessf asserts that the first element is less than the second // -// a.Lessf(1, 2, "error message %s", "formatted") -// a.Lessf(float64(1), float64(2), "error message %s", "formatted") -// a.Lessf("a", "b", "error message %s", "formatted") +// a.Lessf(1, 2, "error message %s", "formatted") +// a.Lessf(float64(1), float64(2), "error message %s", "formatted") +// a.Lessf("a", "b", "error message %s", "formatted") func (a *Assertions) Lessf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -897,8 +981,8 @@ func (a *Assertions) Lessf(e1 interface{}, e2 interface{}, msg string, args ...i // Negative asserts that the specified element is negative // -// a.Negative(-1) -// a.Negative(-1.23) +// a.Negative(-1) +// a.Negative(-1.23) func (a *Assertions) Negative(e interface{}, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -908,8 +992,8 @@ func (a *Assertions) Negative(e interface{}, msgAndArgs ...interface{}) bool { // Negativef asserts that the specified element is negative // -// a.Negativef(-1, "error message %s", "formatted") -// a.Negativef(-1.23, "error message %s", "formatted") +// a.Negativef(-1, "error message %s", "formatted") +// a.Negativef(-1.23, "error message %s", "formatted") func (a *Assertions) Negativef(e interface{}, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -920,7 +1004,7 @@ func (a *Assertions) Negativef(e interface{}, msg string, args ...interface{}) b // Never asserts that the given condition doesn't satisfy in waitFor time, // periodically checking the target function each tick. // -// a.Never(func() bool { return false; }, time.Second, 10*time.Millisecond) +// a.Never(func() bool { return false; }, time.Second, 10*time.Millisecond) func (a *Assertions) Never(condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -931,7 +1015,7 @@ func (a *Assertions) Never(condition func() bool, waitFor time.Duration, tick ti // Neverf asserts that the given condition doesn't satisfy in waitFor time, // periodically checking the target function each tick. // -// a.Neverf(func() bool { return false; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") +// a.Neverf(func() bool { return false; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") func (a *Assertions) Neverf(condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -941,7 +1025,7 @@ func (a *Assertions) Neverf(condition func() bool, waitFor time.Duration, tick t // Nil asserts that the specified object is nil. // -// a.Nil(err) +// a.Nil(err) func (a *Assertions) Nil(object interface{}, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -951,7 +1035,7 @@ func (a *Assertions) Nil(object interface{}, msgAndArgs ...interface{}) bool { // Nilf asserts that the specified object is nil. // -// a.Nilf(err, "error message %s", "formatted") +// a.Nilf(err, "error message %s", "formatted") func (a *Assertions) Nilf(object interface{}, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -979,10 +1063,10 @@ func (a *Assertions) NoDirExistsf(path string, msg string, args ...interface{}) // NoError asserts that a function returned no error (i.e. `nil`). // -// actualObj, err := SomeFunction() -// if a.NoError(err) { -// assert.Equal(t, expectedObj, actualObj) -// } +// actualObj, err := SomeFunction() +// if a.NoError(err) { +// assert.Equal(t, expectedObj, actualObj) +// } func (a *Assertions) NoError(err error, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -992,10 +1076,10 @@ func (a *Assertions) NoError(err error, msgAndArgs ...interface{}) bool { // NoErrorf asserts that a function returned no error (i.e. `nil`). // -// actualObj, err := SomeFunction() -// if a.NoErrorf(err, "error message %s", "formatted") { -// assert.Equal(t, expectedObj, actualObj) -// } +// actualObj, err := SomeFunction() +// if a.NoErrorf(err, "error message %s", "formatted") { +// assert.Equal(t, expectedObj, actualObj) +// } func (a *Assertions) NoErrorf(err error, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1024,9 +1108,9 @@ func (a *Assertions) NoFileExistsf(path string, msg string, args ...interface{}) // NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the // specified substring or element. // -// a.NotContains("Hello World", "Earth") -// a.NotContains(["Hello", "World"], "Earth") -// a.NotContains({"Hello": "World"}, "Earth") +// a.NotContains("Hello World", "Earth") +// a.NotContains(["Hello", "World"], "Earth") +// a.NotContains({"Hello": "World"}, "Earth") func (a *Assertions) NotContains(s interface{}, contains interface{}, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1037,9 +1121,9 @@ func (a *Assertions) NotContains(s interface{}, contains interface{}, msgAndArgs // NotContainsf asserts that the specified string, list(array, slice...) or map does NOT contain the // specified substring or element. // -// a.NotContainsf("Hello World", "Earth", "error message %s", "formatted") -// a.NotContainsf(["Hello", "World"], "Earth", "error message %s", "formatted") -// a.NotContainsf({"Hello": "World"}, "Earth", "error message %s", "formatted") +// a.NotContainsf("Hello World", "Earth", "error message %s", "formatted") +// a.NotContainsf(["Hello", "World"], "Earth", "error message %s", "formatted") +// a.NotContainsf({"Hello": "World"}, "Earth", "error message %s", "formatted") func (a *Assertions) NotContainsf(s interface{}, contains interface{}, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1050,9 +1134,9 @@ func (a *Assertions) NotContainsf(s interface{}, contains interface{}, msg strin // NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either // a slice or a channel with len == 0. // -// if a.NotEmpty(obj) { -// assert.Equal(t, "two", obj[1]) -// } +// if a.NotEmpty(obj) { +// assert.Equal(t, "two", obj[1]) +// } func (a *Assertions) NotEmpty(object interface{}, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1063,9 +1147,9 @@ func (a *Assertions) NotEmpty(object interface{}, msgAndArgs ...interface{}) boo // NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either // a slice or a channel with len == 0. // -// if a.NotEmptyf(obj, "error message %s", "formatted") { -// assert.Equal(t, "two", obj[1]) -// } +// if a.NotEmptyf(obj, "error message %s", "formatted") { +// assert.Equal(t, "two", obj[1]) +// } func (a *Assertions) NotEmptyf(object interface{}, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1075,7 +1159,7 @@ func (a *Assertions) NotEmptyf(object interface{}, msg string, args ...interface // NotEqual asserts that the specified values are NOT equal. // -// a.NotEqual(obj1, obj2) +// a.NotEqual(obj1, obj2) // // Pointer variable equality is determined based on the equality of the // referenced values (as opposed to the memory addresses). @@ -1088,7 +1172,7 @@ func (a *Assertions) NotEqual(expected interface{}, actual interface{}, msgAndAr // NotEqualValues asserts that two objects are not equal even when converted to the same type // -// a.NotEqualValues(obj1, obj2) +// a.NotEqualValues(obj1, obj2) func (a *Assertions) NotEqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1098,7 +1182,7 @@ func (a *Assertions) NotEqualValues(expected interface{}, actual interface{}, ms // NotEqualValuesf asserts that two objects are not equal even when converted to the same type // -// a.NotEqualValuesf(obj1, obj2, "error message %s", "formatted") +// a.NotEqualValuesf(obj1, obj2, "error message %s", "formatted") func (a *Assertions) NotEqualValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1108,7 +1192,7 @@ func (a *Assertions) NotEqualValuesf(expected interface{}, actual interface{}, m // NotEqualf asserts that the specified values are NOT equal. // -// a.NotEqualf(obj1, obj2, "error message %s", "formatted") +// a.NotEqualf(obj1, obj2, "error message %s", "formatted") // // Pointer variable equality is determined based on the equality of the // referenced values (as opposed to the memory addresses). @@ -1139,7 +1223,7 @@ func (a *Assertions) NotErrorIsf(err error, target error, msg string, args ...in // NotNil asserts that the specified object is not nil. // -// a.NotNil(err) +// a.NotNil(err) func (a *Assertions) NotNil(object interface{}, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1149,7 +1233,7 @@ func (a *Assertions) NotNil(object interface{}, msgAndArgs ...interface{}) bool // NotNilf asserts that the specified object is not nil. // -// a.NotNilf(err, "error message %s", "formatted") +// a.NotNilf(err, "error message %s", "formatted") func (a *Assertions) NotNilf(object interface{}, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1159,7 +1243,7 @@ func (a *Assertions) NotNilf(object interface{}, msg string, args ...interface{} // NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic. // -// a.NotPanics(func(){ RemainCalm() }) +// a.NotPanics(func(){ RemainCalm() }) func (a *Assertions) NotPanics(f PanicTestFunc, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1169,7 +1253,7 @@ func (a *Assertions) NotPanics(f PanicTestFunc, msgAndArgs ...interface{}) bool // NotPanicsf asserts that the code inside the specified PanicTestFunc does NOT panic. // -// a.NotPanicsf(func(){ RemainCalm() }, "error message %s", "formatted") +// a.NotPanicsf(func(){ RemainCalm() }, "error message %s", "formatted") func (a *Assertions) NotPanicsf(f PanicTestFunc, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1179,8 +1263,8 @@ func (a *Assertions) NotPanicsf(f PanicTestFunc, msg string, args ...interface{} // NotRegexp asserts that a specified regexp does not match a string. // -// a.NotRegexp(regexp.MustCompile("starts"), "it's starting") -// a.NotRegexp("^start", "it's not starting") +// a.NotRegexp(regexp.MustCompile("starts"), "it's starting") +// a.NotRegexp("^start", "it's not starting") func (a *Assertions) NotRegexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1190,8 +1274,8 @@ func (a *Assertions) NotRegexp(rx interface{}, str interface{}, msgAndArgs ...in // NotRegexpf asserts that a specified regexp does not match a string. // -// a.NotRegexpf(regexp.MustCompile("starts"), "it's starting", "error message %s", "formatted") -// a.NotRegexpf("^start", "it's not starting", "error message %s", "formatted") +// a.NotRegexpf(regexp.MustCompile("starts"), "it's starting", "error message %s", "formatted") +// a.NotRegexpf("^start", "it's not starting", "error message %s", "formatted") func (a *Assertions) NotRegexpf(rx interface{}, str interface{}, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1201,7 +1285,7 @@ func (a *Assertions) NotRegexpf(rx interface{}, str interface{}, msg string, arg // NotSame asserts that two pointers do not reference the same object. // -// a.NotSame(ptr1, ptr2) +// a.NotSame(ptr1, ptr2) // // Both arguments must be pointer variables. Pointer variable sameness is // determined based on the equality of both type and value. @@ -1214,7 +1298,7 @@ func (a *Assertions) NotSame(expected interface{}, actual interface{}, msgAndArg // NotSamef asserts that two pointers do not reference the same object. // -// a.NotSamef(ptr1, ptr2, "error message %s", "formatted") +// a.NotSamef(ptr1, ptr2, "error message %s", "formatted") // // Both arguments must be pointer variables. Pointer variable sameness is // determined based on the equality of both type and value. @@ -1228,7 +1312,7 @@ func (a *Assertions) NotSamef(expected interface{}, actual interface{}, msg stri // NotSubset asserts that the specified list(array, slice...) contains not all // elements given in the specified subset(array, slice...). // -// a.NotSubset([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]") +// a.NotSubset([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]") func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1239,7 +1323,7 @@ func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs // NotSubsetf asserts that the specified list(array, slice...) contains not all // elements given in the specified subset(array, slice...). // -// a.NotSubsetf([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted") +// a.NotSubsetf([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted") func (a *Assertions) NotSubsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1265,7 +1349,7 @@ func (a *Assertions) NotZerof(i interface{}, msg string, args ...interface{}) bo // Panics asserts that the code inside the specified PanicTestFunc panics. // -// a.Panics(func(){ GoCrazy() }) +// a.Panics(func(){ GoCrazy() }) func (a *Assertions) Panics(f PanicTestFunc, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1277,7 +1361,7 @@ func (a *Assertions) Panics(f PanicTestFunc, msgAndArgs ...interface{}) bool { // panics, and that the recovered panic value is an error that satisfies the // EqualError comparison. // -// a.PanicsWithError("crazy error", func(){ GoCrazy() }) +// a.PanicsWithError("crazy error", func(){ GoCrazy() }) func (a *Assertions) PanicsWithError(errString string, f PanicTestFunc, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1289,7 +1373,7 @@ func (a *Assertions) PanicsWithError(errString string, f PanicTestFunc, msgAndAr // panics, and that the recovered panic value is an error that satisfies the // EqualError comparison. // -// a.PanicsWithErrorf("crazy error", func(){ GoCrazy() }, "error message %s", "formatted") +// a.PanicsWithErrorf("crazy error", func(){ GoCrazy() }, "error message %s", "formatted") func (a *Assertions) PanicsWithErrorf(errString string, f PanicTestFunc, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1300,7 +1384,7 @@ func (a *Assertions) PanicsWithErrorf(errString string, f PanicTestFunc, msg str // PanicsWithValue asserts that the code inside the specified PanicTestFunc panics, and that // the recovered panic value equals the expected panic value. // -// a.PanicsWithValue("crazy error", func(){ GoCrazy() }) +// a.PanicsWithValue("crazy error", func(){ GoCrazy() }) func (a *Assertions) PanicsWithValue(expected interface{}, f PanicTestFunc, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1311,7 +1395,7 @@ func (a *Assertions) PanicsWithValue(expected interface{}, f PanicTestFunc, msgA // PanicsWithValuef asserts that the code inside the specified PanicTestFunc panics, and that // the recovered panic value equals the expected panic value. // -// a.PanicsWithValuef("crazy error", func(){ GoCrazy() }, "error message %s", "formatted") +// a.PanicsWithValuef("crazy error", func(){ GoCrazy() }, "error message %s", "formatted") func (a *Assertions) PanicsWithValuef(expected interface{}, f PanicTestFunc, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1321,7 +1405,7 @@ func (a *Assertions) PanicsWithValuef(expected interface{}, f PanicTestFunc, msg // Panicsf asserts that the code inside the specified PanicTestFunc panics. // -// a.Panicsf(func(){ GoCrazy() }, "error message %s", "formatted") +// a.Panicsf(func(){ GoCrazy() }, "error message %s", "formatted") func (a *Assertions) Panicsf(f PanicTestFunc, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1331,8 +1415,8 @@ func (a *Assertions) Panicsf(f PanicTestFunc, msg string, args ...interface{}) b // Positive asserts that the specified element is positive // -// a.Positive(1) -// a.Positive(1.23) +// a.Positive(1) +// a.Positive(1.23) func (a *Assertions) Positive(e interface{}, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1342,8 +1426,8 @@ func (a *Assertions) Positive(e interface{}, msgAndArgs ...interface{}) bool { // Positivef asserts that the specified element is positive // -// a.Positivef(1, "error message %s", "formatted") -// a.Positivef(1.23, "error message %s", "formatted") +// a.Positivef(1, "error message %s", "formatted") +// a.Positivef(1.23, "error message %s", "formatted") func (a *Assertions) Positivef(e interface{}, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1353,8 +1437,8 @@ func (a *Assertions) Positivef(e interface{}, msg string, args ...interface{}) b // Regexp asserts that a specified regexp matches a string. // -// a.Regexp(regexp.MustCompile("start"), "it's starting") -// a.Regexp("start...$", "it's not starting") +// a.Regexp(regexp.MustCompile("start"), "it's starting") +// a.Regexp("start...$", "it's not starting") func (a *Assertions) Regexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1364,8 +1448,8 @@ func (a *Assertions) Regexp(rx interface{}, str interface{}, msgAndArgs ...inter // Regexpf asserts that a specified regexp matches a string. // -// a.Regexpf(regexp.MustCompile("start"), "it's starting", "error message %s", "formatted") -// a.Regexpf("start...$", "it's not starting", "error message %s", "formatted") +// a.Regexpf(regexp.MustCompile("start"), "it's starting", "error message %s", "formatted") +// a.Regexpf("start...$", "it's not starting", "error message %s", "formatted") func (a *Assertions) Regexpf(rx interface{}, str interface{}, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1375,7 +1459,7 @@ func (a *Assertions) Regexpf(rx interface{}, str interface{}, msg string, args . // Same asserts that two pointers reference the same object. // -// a.Same(ptr1, ptr2) +// a.Same(ptr1, ptr2) // // Both arguments must be pointer variables. Pointer variable sameness is // determined based on the equality of both type and value. @@ -1388,7 +1472,7 @@ func (a *Assertions) Same(expected interface{}, actual interface{}, msgAndArgs . // Samef asserts that two pointers reference the same object. // -// a.Samef(ptr1, ptr2, "error message %s", "formatted") +// a.Samef(ptr1, ptr2, "error message %s", "formatted") // // Both arguments must be pointer variables. Pointer variable sameness is // determined based on the equality of both type and value. @@ -1402,7 +1486,7 @@ func (a *Assertions) Samef(expected interface{}, actual interface{}, msg string, // Subset asserts that the specified list(array, slice...) contains all // elements given in the specified subset(array, slice...). // -// a.Subset([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]") +// a.Subset([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]") func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1413,7 +1497,7 @@ func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ... // Subsetf asserts that the specified list(array, slice...) contains all // elements given in the specified subset(array, slice...). // -// a.Subsetf([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted") +// a.Subsetf([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted") func (a *Assertions) Subsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1423,7 +1507,7 @@ func (a *Assertions) Subsetf(list interface{}, subset interface{}, msg string, a // True asserts that the specified value is true. // -// a.True(myBool) +// a.True(myBool) func (a *Assertions) True(value bool, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1433,7 +1517,7 @@ func (a *Assertions) True(value bool, msgAndArgs ...interface{}) bool { // Truef asserts that the specified value is true. // -// a.Truef(myBool, "error message %s", "formatted") +// a.Truef(myBool, "error message %s", "formatted") func (a *Assertions) Truef(value bool, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1443,7 +1527,7 @@ func (a *Assertions) Truef(value bool, msg string, args ...interface{}) bool { // WithinDuration asserts that the two times are within duration delta of each other. // -// a.WithinDuration(time.Now(), time.Now(), 10*time.Second) +// a.WithinDuration(time.Now(), time.Now(), 10*time.Second) func (a *Assertions) WithinDuration(expected time.Time, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1453,7 +1537,7 @@ func (a *Assertions) WithinDuration(expected time.Time, actual time.Time, delta // WithinDurationf asserts that the two times are within duration delta of each other. // -// a.WithinDurationf(time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted") +// a.WithinDurationf(time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted") func (a *Assertions) WithinDurationf(expected time.Time, actual time.Time, delta time.Duration, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1463,7 +1547,7 @@ func (a *Assertions) WithinDurationf(expected time.Time, actual time.Time, delta // WithinRange asserts that a time is within a time range (inclusive). // -// a.WithinRange(time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second)) +// a.WithinRange(time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second)) func (a *Assertions) WithinRange(actual time.Time, start time.Time, end time.Time, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1473,7 +1557,7 @@ func (a *Assertions) WithinRange(actual time.Time, start time.Time, end time.Tim // WithinRangef asserts that a time is within a time range (inclusive). // -// a.WithinRangef(time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second), "error message %s", "formatted") +// a.WithinRangef(time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second), "error message %s", "formatted") func (a *Assertions) WithinRangef(actual time.Time, start time.Time, end time.Time, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() diff --git a/vendor/github.com/stretchr/testify/assert/assertion_order.go b/vendor/github.com/stretchr/testify/assert/assertion_order.go index 759448783..00df62a05 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_order.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_order.go @@ -46,36 +46,36 @@ func isOrdered(t TestingT, object interface{}, allowedComparesResults []CompareT // IsIncreasing asserts that the collection is increasing // -// assert.IsIncreasing(t, []int{1, 2, 3}) -// assert.IsIncreasing(t, []float{1, 2}) -// assert.IsIncreasing(t, []string{"a", "b"}) +// assert.IsIncreasing(t, []int{1, 2, 3}) +// assert.IsIncreasing(t, []float{1, 2}) +// assert.IsIncreasing(t, []string{"a", "b"}) func IsIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { return isOrdered(t, object, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...) } // IsNonIncreasing asserts that the collection is not increasing // -// assert.IsNonIncreasing(t, []int{2, 1, 1}) -// assert.IsNonIncreasing(t, []float{2, 1}) -// assert.IsNonIncreasing(t, []string{"b", "a"}) +// assert.IsNonIncreasing(t, []int{2, 1, 1}) +// assert.IsNonIncreasing(t, []float{2, 1}) +// assert.IsNonIncreasing(t, []string{"b", "a"}) func IsNonIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { return isOrdered(t, object, []CompareType{compareEqual, compareGreater}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...) } // IsDecreasing asserts that the collection is decreasing // -// assert.IsDecreasing(t, []int{2, 1, 0}) -// assert.IsDecreasing(t, []float{2, 1}) -// assert.IsDecreasing(t, []string{"b", "a"}) +// assert.IsDecreasing(t, []int{2, 1, 0}) +// assert.IsDecreasing(t, []float{2, 1}) +// assert.IsDecreasing(t, []string{"b", "a"}) func IsDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { return isOrdered(t, object, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...) } // IsNonDecreasing asserts that the collection is not decreasing // -// assert.IsNonDecreasing(t, []int{1, 1, 2}) -// assert.IsNonDecreasing(t, []float{1, 2}) -// assert.IsNonDecreasing(t, []string{"a", "b"}) +// assert.IsNonDecreasing(t, []int{1, 1, 2}) +// assert.IsNonDecreasing(t, []float{1, 2}) +// assert.IsNonDecreasing(t, []string{"a", "b"}) func IsNonDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { return isOrdered(t, object, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...) } diff --git a/vendor/github.com/stretchr/testify/assert/assertions.go b/vendor/github.com/stretchr/testify/assert/assertions.go index fa1245b18..a55d1bba9 100644 --- a/vendor/github.com/stretchr/testify/assert/assertions.go +++ b/vendor/github.com/stretchr/testify/assert/assertions.go @@ -8,7 +8,6 @@ import ( "fmt" "math" "os" - "path/filepath" "reflect" "regexp" "runtime" @@ -76,6 +75,77 @@ func ObjectsAreEqual(expected, actual interface{}) bool { return bytes.Equal(exp, act) } +// copyExportedFields iterates downward through nested data structures and creates a copy +// that only contains the exported struct fields. +func copyExportedFields(expected interface{}) interface{} { + if isNil(expected) { + return expected + } + + expectedType := reflect.TypeOf(expected) + expectedKind := expectedType.Kind() + expectedValue := reflect.ValueOf(expected) + + switch expectedKind { + case reflect.Struct: + result := reflect.New(expectedType).Elem() + for i := 0; i < expectedType.NumField(); i++ { + field := expectedType.Field(i) + isExported := field.IsExported() + if isExported { + fieldValue := expectedValue.Field(i) + if isNil(fieldValue) || isNil(fieldValue.Interface()) { + continue + } + newValue := copyExportedFields(fieldValue.Interface()) + result.Field(i).Set(reflect.ValueOf(newValue)) + } + } + return result.Interface() + + case reflect.Ptr: + result := reflect.New(expectedType.Elem()) + unexportedRemoved := copyExportedFields(expectedValue.Elem().Interface()) + result.Elem().Set(reflect.ValueOf(unexportedRemoved)) + return result.Interface() + + case reflect.Array, reflect.Slice: + result := reflect.MakeSlice(expectedType, expectedValue.Len(), expectedValue.Len()) + for i := 0; i < expectedValue.Len(); i++ { + index := expectedValue.Index(i) + if isNil(index) { + continue + } + unexportedRemoved := copyExportedFields(index.Interface()) + result.Index(i).Set(reflect.ValueOf(unexportedRemoved)) + } + return result.Interface() + + case reflect.Map: + result := reflect.MakeMap(expectedType) + for _, k := range expectedValue.MapKeys() { + index := expectedValue.MapIndex(k) + unexportedRemoved := copyExportedFields(index.Interface()) + result.SetMapIndex(k, reflect.ValueOf(unexportedRemoved)) + } + return result.Interface() + + default: + return expected + } +} + +// ObjectsExportedFieldsAreEqual determines if the exported (public) fields of two objects are +// considered equal. This comparison of only exported fields is applied recursively to nested data +// structures. +// +// This function does no assertion of any kind. +func ObjectsExportedFieldsAreEqual(expected, actual interface{}) bool { + expectedCleaned := copyExportedFields(expected) + actualCleaned := copyExportedFields(actual) + return ObjectsAreEqualValues(expectedCleaned, actualCleaned) +} + // ObjectsAreEqualValues gets whether two objects are equal, or if their // values are equal. func ObjectsAreEqualValues(expected, actual interface{}) bool { @@ -141,12 +211,11 @@ func CallerInfo() []string { } parts := strings.Split(file, "/") - file = parts[len(parts)-1] if len(parts) > 1 { + filename := parts[len(parts)-1] dir := parts[len(parts)-2] - if (dir != "assert" && dir != "mock" && dir != "require") || file == "mock_test.go" { - path, _ := filepath.Abs(file) - callers = append(callers, fmt.Sprintf("%s:%d", path, line)) + if (dir != "assert" && dir != "mock" && dir != "require") || filename == "mock_test.go" { + callers = append(callers, fmt.Sprintf("%s:%d", file, line)) } } @@ -273,7 +342,7 @@ type labeledContent struct { // labeledOutput returns a string consisting of the provided labeledContent. Each labeled output is appended in the following manner: // -// \t{{label}}:{{align_spaces}}\t{{content}}\n +// \t{{label}}:{{align_spaces}}\t{{content}}\n // // The initial carriage return is required to undo/erase any padding added by testing.T.Errorf. The "\t{{label}}:" is for the label. // If a label is shorter than the longest label provided, padding spaces are added to make all the labels match in length. Once this @@ -296,7 +365,7 @@ func labeledOutput(content ...labeledContent) string { // Implements asserts that an object is implemented by the specified interface. // -// assert.Implements(t, (*MyInterface)(nil), new(MyObject)) +// assert.Implements(t, (*MyInterface)(nil), new(MyObject)) func Implements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -328,7 +397,7 @@ func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs // Equal asserts that two objects are equal. // -// assert.Equal(t, 123, 123) +// assert.Equal(t, 123, 123) // // Pointer variable equality is determined based on the equality of the // referenced values (as opposed to the memory addresses). Function equality @@ -369,7 +438,7 @@ func validateEqualArgs(expected, actual interface{}) error { // Same asserts that two pointers reference the same object. // -// assert.Same(t, ptr1, ptr2) +// assert.Same(t, ptr1, ptr2) // // Both arguments must be pointer variables. Pointer variable sameness is // determined based on the equality of both type and value. @@ -389,7 +458,7 @@ func Same(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) b // NotSame asserts that two pointers do not reference the same object. // -// assert.NotSame(t, ptr1, ptr2) +// assert.NotSame(t, ptr1, ptr2) // // Both arguments must be pointer variables. Pointer variable sameness is // determined based on the equality of both type and value. @@ -457,7 +526,7 @@ func truncatingFormat(data interface{}) string { // EqualValues asserts that two objects are equal or convertable to the same types // and equal. // -// assert.EqualValues(t, uint32(123), int32(123)) +// assert.EqualValues(t, uint32(123), int32(123)) func EqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -475,9 +544,53 @@ func EqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...interfa } +// EqualExportedValues asserts that the types of two objects are equal and their public +// fields are also equal. This is useful for comparing structs that have private fields +// that could potentially differ. +// +// type S struct { +// Exported int +// notExported int +// } +// assert.EqualExportedValues(t, S{1, 2}, S{1, 3}) => true +// assert.EqualExportedValues(t, S{1, 2}, S{2, 3}) => false +func EqualExportedValues(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + + aType := reflect.TypeOf(expected) + bType := reflect.TypeOf(actual) + + if aType != bType { + return Fail(t, fmt.Sprintf("Types expected to match exactly\n\t%v != %v", aType, bType), msgAndArgs...) + } + + if aType.Kind() != reflect.Struct { + return Fail(t, fmt.Sprintf("Types expected to both be struct \n\t%v != %v", aType.Kind(), reflect.Struct), msgAndArgs...) + } + + if bType.Kind() != reflect.Struct { + return Fail(t, fmt.Sprintf("Types expected to both be struct \n\t%v != %v", bType.Kind(), reflect.Struct), msgAndArgs...) + } + + expected = copyExportedFields(expected) + actual = copyExportedFields(actual) + + if !ObjectsAreEqualValues(expected, actual) { + diff := diff(expected, actual) + expected, actual = formatUnequalValues(expected, actual) + return Fail(t, fmt.Sprintf("Not equal (comparing only exported fields): \n"+ + "expected: %s\n"+ + "actual : %s%s", expected, actual, diff), msgAndArgs...) + } + + return true +} + // Exactly asserts that two objects are equal in value and type. // -// assert.Exactly(t, int32(123), int64(123)) +// assert.Exactly(t, int32(123), int64(123)) func Exactly(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -496,7 +609,7 @@ func Exactly(t TestingT, expected, actual interface{}, msgAndArgs ...interface{} // NotNil asserts that the specified object is not nil. // -// assert.NotNil(t, err) +// assert.NotNil(t, err) func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { if !isNil(object) { return true @@ -530,7 +643,7 @@ func isNil(object interface{}) bool { []reflect.Kind{ reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, - reflect.Ptr, reflect.Slice}, + reflect.Ptr, reflect.Slice, reflect.UnsafePointer}, kind) if isNilableKind && value.IsNil() { @@ -542,7 +655,7 @@ func isNil(object interface{}) bool { // Nil asserts that the specified object is nil. // -// assert.Nil(t, err) +// assert.Nil(t, err) func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { if isNil(object) { return true @@ -585,7 +698,7 @@ func isEmpty(object interface{}) bool { // Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either // a slice or a channel with len == 0. // -// assert.Empty(t, obj) +// assert.Empty(t, obj) func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { pass := isEmpty(object) if !pass { @@ -602,9 +715,9 @@ func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { // NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either // a slice or a channel with len == 0. // -// if assert.NotEmpty(t, obj) { -// assert.Equal(t, "two", obj[1]) -// } +// if assert.NotEmpty(t, obj) { +// assert.Equal(t, "two", obj[1]) +// } func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { pass := !isEmpty(object) if !pass { @@ -633,7 +746,7 @@ func getLen(x interface{}) (ok bool, length int) { // Len asserts that the specified object has specific length. // Len also fails if the object has a type that len() not accept. // -// assert.Len(t, mySlice, 3) +// assert.Len(t, mySlice, 3) func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -651,7 +764,7 @@ func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{}) // True asserts that the specified value is true. // -// assert.True(t, myBool) +// assert.True(t, myBool) func True(t TestingT, value bool, msgAndArgs ...interface{}) bool { if !value { if h, ok := t.(tHelper); ok { @@ -666,7 +779,7 @@ func True(t TestingT, value bool, msgAndArgs ...interface{}) bool { // False asserts that the specified value is false. // -// assert.False(t, myBool) +// assert.False(t, myBool) func False(t TestingT, value bool, msgAndArgs ...interface{}) bool { if value { if h, ok := t.(tHelper); ok { @@ -681,7 +794,7 @@ func False(t TestingT, value bool, msgAndArgs ...interface{}) bool { // NotEqual asserts that the specified values are NOT equal. // -// assert.NotEqual(t, obj1, obj2) +// assert.NotEqual(t, obj1, obj2) // // Pointer variable equality is determined based on the equality of the // referenced values (as opposed to the memory addresses). @@ -704,7 +817,7 @@ func NotEqual(t TestingT, expected, actual interface{}, msgAndArgs ...interface{ // NotEqualValues asserts that two objects are not equal even when converted to the same type // -// assert.NotEqualValues(t, obj1, obj2) +// assert.NotEqualValues(t, obj1, obj2) func NotEqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -763,9 +876,9 @@ func containsElement(list interface{}, element interface{}) (ok, found bool) { // Contains asserts that the specified string, list(array, slice...) or map contains the // specified substring or element. // -// assert.Contains(t, "Hello World", "World") -// assert.Contains(t, ["Hello", "World"], "World") -// assert.Contains(t, {"Hello": "World"}, "Hello") +// assert.Contains(t, "Hello World", "World") +// assert.Contains(t, ["Hello", "World"], "World") +// assert.Contains(t, {"Hello": "World"}, "Hello") func Contains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -786,9 +899,9 @@ func Contains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bo // NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the // specified substring or element. // -// assert.NotContains(t, "Hello World", "Earth") -// assert.NotContains(t, ["Hello", "World"], "Earth") -// assert.NotContains(t, {"Hello": "World"}, "Earth") +// assert.NotContains(t, "Hello World", "Earth") +// assert.NotContains(t, ["Hello", "World"], "Earth") +// assert.NotContains(t, {"Hello": "World"}, "Earth") func NotContains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -796,10 +909,10 @@ func NotContains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) ok, found := containsElement(s, contains) if !ok { - return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", s), msgAndArgs...) + return Fail(t, fmt.Sprintf("%#v could not be applied builtin len()", s), msgAndArgs...) } if found { - return Fail(t, fmt.Sprintf("\"%s\" should not contain \"%s\"", s, contains), msgAndArgs...) + return Fail(t, fmt.Sprintf("%#v should not contain %#v", s, contains), msgAndArgs...) } return true @@ -809,7 +922,7 @@ func NotContains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) // Subset asserts that the specified list(array, slice...) contains all // elements given in the specified subset(array, slice...). // -// assert.Subset(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]") +// assert.Subset(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]") func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) { if h, ok := t.(tHelper); ok { h.Helper() @@ -818,49 +931,44 @@ func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok return true // we consider nil to be equal to the nil set } - defer func() { - if e := recover(); e != nil { - ok = false - } - }() - listKind := reflect.TypeOf(list).Kind() - subsetKind := reflect.TypeOf(subset).Kind() - if listKind != reflect.Array && listKind != reflect.Slice && listKind != reflect.Map { return Fail(t, fmt.Sprintf("%q has an unsupported type %s", list, listKind), msgAndArgs...) } + subsetKind := reflect.TypeOf(subset).Kind() if subsetKind != reflect.Array && subsetKind != reflect.Slice && listKind != reflect.Map { return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...) } - subsetValue := reflect.ValueOf(subset) if subsetKind == reflect.Map && listKind == reflect.Map { - listValue := reflect.ValueOf(list) - subsetKeys := subsetValue.MapKeys() + subsetMap := reflect.ValueOf(subset) + actualMap := reflect.ValueOf(list) - for i := 0; i < len(subsetKeys); i++ { - subsetKey := subsetKeys[i] - subsetElement := subsetValue.MapIndex(subsetKey).Interface() - listElement := listValue.MapIndex(subsetKey).Interface() + for _, k := range subsetMap.MapKeys() { + ev := subsetMap.MapIndex(k) + av := actualMap.MapIndex(k) - if !ObjectsAreEqual(subsetElement, listElement) { - return Fail(t, fmt.Sprintf("\"%s\" does not contain \"%s\"", list, subsetElement), msgAndArgs...) + if !av.IsValid() { + return Fail(t, fmt.Sprintf("%#v does not contain %#v", list, subset), msgAndArgs...) + } + if !ObjectsAreEqual(ev.Interface(), av.Interface()) { + return Fail(t, fmt.Sprintf("%#v does not contain %#v", list, subset), msgAndArgs...) } } return true } - for i := 0; i < subsetValue.Len(); i++ { - element := subsetValue.Index(i).Interface() + subsetList := reflect.ValueOf(subset) + for i := 0; i < subsetList.Len(); i++ { + element := subsetList.Index(i).Interface() ok, found := containsElement(list, element) if !ok { - return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...) + return Fail(t, fmt.Sprintf("%#v could not be applied builtin len()", list), msgAndArgs...) } if !found { - return Fail(t, fmt.Sprintf("\"%s\" does not contain \"%s\"", list, element), msgAndArgs...) + return Fail(t, fmt.Sprintf("%#v does not contain %#v", list, element), msgAndArgs...) } } @@ -870,7 +978,7 @@ func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok // NotSubset asserts that the specified list(array, slice...) contains not all // elements given in the specified subset(array, slice...). // -// assert.NotSubset(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]") +// assert.NotSubset(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]") func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) { if h, ok := t.(tHelper); ok { h.Helper() @@ -879,34 +987,28 @@ func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) return Fail(t, "nil is the empty set which is a subset of every set", msgAndArgs...) } - defer func() { - if e := recover(); e != nil { - ok = false - } - }() - listKind := reflect.TypeOf(list).Kind() - subsetKind := reflect.TypeOf(subset).Kind() - if listKind != reflect.Array && listKind != reflect.Slice && listKind != reflect.Map { return Fail(t, fmt.Sprintf("%q has an unsupported type %s", list, listKind), msgAndArgs...) } + subsetKind := reflect.TypeOf(subset).Kind() if subsetKind != reflect.Array && subsetKind != reflect.Slice && listKind != reflect.Map { return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...) } - subsetValue := reflect.ValueOf(subset) if subsetKind == reflect.Map && listKind == reflect.Map { - listValue := reflect.ValueOf(list) - subsetKeys := subsetValue.MapKeys() + subsetMap := reflect.ValueOf(subset) + actualMap := reflect.ValueOf(list) - for i := 0; i < len(subsetKeys); i++ { - subsetKey := subsetKeys[i] - subsetElement := subsetValue.MapIndex(subsetKey).Interface() - listElement := listValue.MapIndex(subsetKey).Interface() + for _, k := range subsetMap.MapKeys() { + ev := subsetMap.MapIndex(k) + av := actualMap.MapIndex(k) - if !ObjectsAreEqual(subsetElement, listElement) { + if !av.IsValid() { + return true + } + if !ObjectsAreEqual(ev.Interface(), av.Interface()) { return true } } @@ -914,8 +1016,9 @@ func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) return Fail(t, fmt.Sprintf("%q is a subset of %q", subset, list), msgAndArgs...) } - for i := 0; i < subsetValue.Len(); i++ { - element := subsetValue.Index(i).Interface() + subsetList := reflect.ValueOf(subset) + for i := 0; i < subsetList.Len(); i++ { + element := subsetList.Index(i).Interface() ok, found := containsElement(list, element) if !ok { return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...) @@ -1060,7 +1163,7 @@ func didPanic(f PanicTestFunc) (didPanic bool, message interface{}, stack string // Panics asserts that the code inside the specified PanicTestFunc panics. // -// assert.Panics(t, func(){ GoCrazy() }) +// assert.Panics(t, func(){ GoCrazy() }) func Panics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -1076,7 +1179,7 @@ func Panics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool { // PanicsWithValue asserts that the code inside the specified PanicTestFunc panics, and that // the recovered panic value equals the expected panic value. // -// assert.PanicsWithValue(t, "crazy error", func(){ GoCrazy() }) +// assert.PanicsWithValue(t, "crazy error", func(){ GoCrazy() }) func PanicsWithValue(t TestingT, expected interface{}, f PanicTestFunc, msgAndArgs ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -1097,7 +1200,7 @@ func PanicsWithValue(t TestingT, expected interface{}, f PanicTestFunc, msgAndAr // panics, and that the recovered panic value is an error that satisfies the // EqualError comparison. // -// assert.PanicsWithError(t, "crazy error", func(){ GoCrazy() }) +// assert.PanicsWithError(t, "crazy error", func(){ GoCrazy() }) func PanicsWithError(t TestingT, errString string, f PanicTestFunc, msgAndArgs ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -1117,7 +1220,7 @@ func PanicsWithError(t TestingT, errString string, f PanicTestFunc, msgAndArgs . // NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic. // -// assert.NotPanics(t, func(){ RemainCalm() }) +// assert.NotPanics(t, func(){ RemainCalm() }) func NotPanics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -1132,7 +1235,7 @@ func NotPanics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool { // WithinDuration asserts that the two times are within duration delta of each other. // -// assert.WithinDuration(t, time.Now(), time.Now(), 10*time.Second) +// assert.WithinDuration(t, time.Now(), time.Now(), 10*time.Second) func WithinDuration(t TestingT, expected, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -1148,7 +1251,7 @@ func WithinDuration(t TestingT, expected, actual time.Time, delta time.Duration, // WithinRange asserts that a time is within a time range (inclusive). // -// assert.WithinRange(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second)) +// assert.WithinRange(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second)) func WithinRange(t TestingT, actual, start, end time.Time, msgAndArgs ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -1207,7 +1310,7 @@ func toFloat(x interface{}) (float64, bool) { // InDelta asserts that the two numerals are within delta of each other. // -// assert.InDelta(t, math.Pi, 22/7.0, 0.01) +// assert.InDelta(t, math.Pi, 22/7.0, 0.01) func InDelta(t TestingT, expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -1380,10 +1483,10 @@ func InEpsilonSlice(t TestingT, expected, actual interface{}, epsilon float64, m // NoError asserts that a function returned no error (i.e. `nil`). // -// actualObj, err := SomeFunction() -// if assert.NoError(t, err) { -// assert.Equal(t, expectedObj, actualObj) -// } +// actualObj, err := SomeFunction() +// if assert.NoError(t, err) { +// assert.Equal(t, expectedObj, actualObj) +// } func NoError(t TestingT, err error, msgAndArgs ...interface{}) bool { if err != nil { if h, ok := t.(tHelper); ok { @@ -1397,10 +1500,10 @@ func NoError(t TestingT, err error, msgAndArgs ...interface{}) bool { // Error asserts that a function returned an error (i.e. not `nil`). // -// actualObj, err := SomeFunction() -// if assert.Error(t, err) { -// assert.Equal(t, expectedError, err) -// } +// actualObj, err := SomeFunction() +// if assert.Error(t, err) { +// assert.Equal(t, expectedError, err) +// } func Error(t TestingT, err error, msgAndArgs ...interface{}) bool { if err == nil { if h, ok := t.(tHelper); ok { @@ -1415,8 +1518,8 @@ func Error(t TestingT, err error, msgAndArgs ...interface{}) bool { // EqualError asserts that a function returned an error (i.e. not `nil`) // and that it is equal to the provided error. // -// actualObj, err := SomeFunction() -// assert.EqualError(t, err, expectedErrorString) +// actualObj, err := SomeFunction() +// assert.EqualError(t, err, expectedErrorString) func EqualError(t TestingT, theError error, errString string, msgAndArgs ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -1438,8 +1541,8 @@ func EqualError(t TestingT, theError error, errString string, msgAndArgs ...inte // ErrorContains asserts that a function returned an error (i.e. not `nil`) // and that the error contains the specified substring. // -// actualObj, err := SomeFunction() -// assert.ErrorContains(t, err, expectedErrorSubString) +// actualObj, err := SomeFunction() +// assert.ErrorContains(t, err, expectedErrorSubString) func ErrorContains(t TestingT, theError error, contains string, msgAndArgs ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -1472,8 +1575,8 @@ func matchRegexp(rx interface{}, str interface{}) bool { // Regexp asserts that a specified regexp matches a string. // -// assert.Regexp(t, regexp.MustCompile("start"), "it's starting") -// assert.Regexp(t, "start...$", "it's not starting") +// assert.Regexp(t, regexp.MustCompile("start"), "it's starting") +// assert.Regexp(t, "start...$", "it's not starting") func Regexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -1490,8 +1593,8 @@ func Regexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface // NotRegexp asserts that a specified regexp does not match a string. // -// assert.NotRegexp(t, regexp.MustCompile("starts"), "it's starting") -// assert.NotRegexp(t, "^start", "it's not starting") +// assert.NotRegexp(t, regexp.MustCompile("starts"), "it's starting") +// assert.NotRegexp(t, "^start", "it's not starting") func NotRegexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -1603,7 +1706,7 @@ func NoDirExists(t TestingT, path string, msgAndArgs ...interface{}) bool { // JSONEq asserts that two JSON strings are equivalent. // -// assert.JSONEq(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) +// assert.JSONEq(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) func JSONEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -1726,7 +1829,7 @@ type tHelper interface { // Eventually asserts that given condition will be met in waitFor time, // periodically checking target function each tick. // -// assert.Eventually(t, func() bool { return true; }, time.Second, 10*time.Millisecond) +// assert.Eventually(t, func() bool { return true; }, time.Second, 10*time.Millisecond) func Eventually(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -1756,10 +1859,93 @@ func Eventually(t TestingT, condition func() bool, waitFor time.Duration, tick t } } +// CollectT implements the TestingT interface and collects all errors. +type CollectT struct { + errors []error +} + +// Errorf collects the error. +func (c *CollectT) Errorf(format string, args ...interface{}) { + c.errors = append(c.errors, fmt.Errorf(format, args...)) +} + +// FailNow panics. +func (c *CollectT) FailNow() { + panic("Assertion failed") +} + +// Reset clears the collected errors. +func (c *CollectT) Reset() { + c.errors = nil +} + +// Copy copies the collected errors to the supplied t. +func (c *CollectT) Copy(t TestingT) { + if tt, ok := t.(tHelper); ok { + tt.Helper() + } + for _, err := range c.errors { + t.Errorf("%v", err) + } +} + +// EventuallyWithT asserts that given condition will be met in waitFor time, +// periodically checking target function each tick. In contrast to Eventually, +// it supplies a CollectT to the condition function, so that the condition +// function can use the CollectT to call other assertions. +// The condition is considered "met" if no errors are raised in a tick. +// The supplied CollectT collects all errors from one tick (if there are any). +// If the condition is not met before waitFor, the collected errors of +// the last tick are copied to t. +// +// externalValue := false +// go func() { +// time.Sleep(8*time.Second) +// externalValue = true +// }() +// assert.EventuallyWithT(t, func(c *assert.CollectT) { +// // add assertions as needed; any assertion failure will fail the current tick +// assert.True(c, externalValue, "expected 'externalValue' to be true") +// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false") +func EventuallyWithT(t TestingT, condition func(collect *CollectT), waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + + collect := new(CollectT) + ch := make(chan bool, 1) + + timer := time.NewTimer(waitFor) + defer timer.Stop() + + ticker := time.NewTicker(tick) + defer ticker.Stop() + + for tick := ticker.C; ; { + select { + case <-timer.C: + collect.Copy(t) + return Fail(t, "Condition never satisfied", msgAndArgs...) + case <-tick: + tick = nil + collect.Reset() + go func() { + condition(collect) + ch <- len(collect.errors) == 0 + }() + case v := <-ch: + if v { + return true + } + tick = ticker.C + } + } +} + // Never asserts that the given condition doesn't satisfy in waitFor time, // periodically checking the target function each tick. // -// assert.Never(t, func() bool { return false; }, time.Second, 10*time.Millisecond) +// assert.Never(t, func() bool { return false; }, time.Second, 10*time.Millisecond) func Never(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() diff --git a/vendor/github.com/stretchr/testify/assert/doc.go b/vendor/github.com/stretchr/testify/assert/doc.go index c9dccc4d6..4953981d3 100644 --- a/vendor/github.com/stretchr/testify/assert/doc.go +++ b/vendor/github.com/stretchr/testify/assert/doc.go @@ -1,39 +1,40 @@ // Package assert provides a set of comprehensive testing tools for use with the normal Go testing system. // -// Example Usage +// # Example Usage // // The following is a complete example using assert in a standard test function: -// import ( -// "testing" -// "github.com/stretchr/testify/assert" -// ) // -// func TestSomething(t *testing.T) { +// import ( +// "testing" +// "github.com/stretchr/testify/assert" +// ) // -// var a string = "Hello" -// var b string = "Hello" +// func TestSomething(t *testing.T) { // -// assert.Equal(t, a, b, "The two words should be the same.") +// var a string = "Hello" +// var b string = "Hello" // -// } +// assert.Equal(t, a, b, "The two words should be the same.") +// +// } // // if you assert many times, use the format below: // -// import ( -// "testing" -// "github.com/stretchr/testify/assert" -// ) +// import ( +// "testing" +// "github.com/stretchr/testify/assert" +// ) // -// func TestSomething(t *testing.T) { -// assert := assert.New(t) +// func TestSomething(t *testing.T) { +// assert := assert.New(t) // -// var a string = "Hello" -// var b string = "Hello" +// var a string = "Hello" +// var b string = "Hello" // -// assert.Equal(a, b, "The two words should be the same.") -// } +// assert.Equal(a, b, "The two words should be the same.") +// } // -// Assertions +// # Assertions // // Assertions allow you to easily write test code, and are global funcs in the `assert` package. // All assertion functions take, as the first argument, the `*testing.T` object provided by the diff --git a/vendor/github.com/stretchr/testify/assert/http_assertions.go b/vendor/github.com/stretchr/testify/assert/http_assertions.go index 4ed341dd2..d8038c28a 100644 --- a/vendor/github.com/stretchr/testify/assert/http_assertions.go +++ b/vendor/github.com/stretchr/testify/assert/http_assertions.go @@ -23,7 +23,7 @@ func httpCode(handler http.HandlerFunc, method, url string, values url.Values) ( // HTTPSuccess asserts that a specified handler returns a success status code. // -// assert.HTTPSuccess(t, myHandler, "POST", "http://www.google.com", nil) +// assert.HTTPSuccess(t, myHandler, "POST", "http://www.google.com", nil) // // Returns whether the assertion was successful (true) or not (false). func HTTPSuccess(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, msgAndArgs ...interface{}) bool { @@ -45,7 +45,7 @@ func HTTPSuccess(t TestingT, handler http.HandlerFunc, method, url string, value // HTTPRedirect asserts that a specified handler returns a redirect status code. // -// assert.HTTPRedirect(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// assert.HTTPRedirect(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} // // Returns whether the assertion was successful (true) or not (false). func HTTPRedirect(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, msgAndArgs ...interface{}) bool { @@ -67,7 +67,7 @@ func HTTPRedirect(t TestingT, handler http.HandlerFunc, method, url string, valu // HTTPError asserts that a specified handler returns an error status code. // -// assert.HTTPError(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// assert.HTTPError(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} // // Returns whether the assertion was successful (true) or not (false). func HTTPError(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, msgAndArgs ...interface{}) bool { @@ -89,7 +89,7 @@ func HTTPError(t TestingT, handler http.HandlerFunc, method, url string, values // HTTPStatusCode asserts that a specified handler returns a specified status code. // -// assert.HTTPStatusCode(t, myHandler, "GET", "/notImplemented", nil, 501) +// assert.HTTPStatusCode(t, myHandler, "GET", "/notImplemented", nil, 501) // // Returns whether the assertion was successful (true) or not (false). func HTTPStatusCode(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, statuscode int, msgAndArgs ...interface{}) bool { @@ -124,7 +124,7 @@ func HTTPBody(handler http.HandlerFunc, method, url string, values url.Values) s // HTTPBodyContains asserts that a specified handler returns a // body that contains a string. // -// assert.HTTPBodyContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") +// assert.HTTPBodyContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") // // Returns whether the assertion was successful (true) or not (false). func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool { @@ -144,7 +144,7 @@ func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method, url string, // HTTPBodyNotContains asserts that a specified handler returns a // body that does not contain a string. // -// assert.HTTPBodyNotContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") +// assert.HTTPBodyNotContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") // // Returns whether the assertion was successful (true) or not (false). func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool { diff --git a/vendor/github.com/stretchr/testify/mock/doc.go b/vendor/github.com/stretchr/testify/mock/doc.go index 7324128ef..d6b3c844c 100644 --- a/vendor/github.com/stretchr/testify/mock/doc.go +++ b/vendor/github.com/stretchr/testify/mock/doc.go @@ -1,17 +1,17 @@ // Package mock provides a system by which it is possible to mock your objects // and verify calls are happening as expected. // -// Example Usage +// # Example Usage // // The mock package provides an object, Mock, that tracks activity on another object. It is usually // embedded into a test object as shown below: // -// type MyTestObject struct { -// // add a Mock object instance -// mock.Mock +// type MyTestObject struct { +// // add a Mock object instance +// mock.Mock // -// // other fields go here as normal -// } +// // other fields go here as normal +// } // // When implementing the methods of an interface, you wire your functions up // to call the Mock.Called(args...) method, and return the appropriate values. @@ -19,25 +19,25 @@ // For example, to mock a method that saves the name and age of a person and returns // the year of their birth or an error, you might write this: // -// func (o *MyTestObject) SavePersonDetails(firstname, lastname string, age int) (int, error) { -// args := o.Called(firstname, lastname, age) -// return args.Int(0), args.Error(1) -// } +// func (o *MyTestObject) SavePersonDetails(firstname, lastname string, age int) (int, error) { +// args := o.Called(firstname, lastname, age) +// return args.Int(0), args.Error(1) +// } // // The Int, Error and Bool methods are examples of strongly typed getters that take the argument // index position. Given this argument list: // -// (12, true, "Something") +// (12, true, "Something") // // You could read them out strongly typed like this: // -// args.Int(0) -// args.Bool(1) -// args.String(2) +// args.Int(0) +// args.Bool(1) +// args.String(2) // // For objects of your own type, use the generic Arguments.Get(index) method and make a type assertion: // -// return args.Get(0).(*MyObject), args.Get(1).(*AnotherObjectOfMine) +// return args.Get(0).(*MyObject), args.Get(1).(*AnotherObjectOfMine) // // This may cause a panic if the object you are getting is nil (the type assertion will fail), in those // cases you should check for nil first. diff --git a/vendor/github.com/stretchr/testify/mock/mock.go b/vendor/github.com/stretchr/testify/mock/mock.go index f0af8246c..f4b42e44f 100644 --- a/vendor/github.com/stretchr/testify/mock/mock.go +++ b/vendor/github.com/stretchr/testify/mock/mock.go @@ -3,6 +3,7 @@ package mock import ( "errors" "fmt" + "path" "reflect" "regexp" "runtime" @@ -13,6 +14,7 @@ import ( "github.com/davecgh/go-spew/spew" "github.com/pmezard/go-difflib/difflib" "github.com/stretchr/objx" + "github.com/stretchr/testify/assert" ) @@ -99,7 +101,7 @@ func (c *Call) unlock() { // Return specifies the return arguments for the expectation. // -// Mock.On("DoSomething").Return(errors.New("failed")) +// Mock.On("DoSomething").Return(errors.New("failed")) func (c *Call) Return(returnArguments ...interface{}) *Call { c.lock() defer c.unlock() @@ -111,7 +113,7 @@ func (c *Call) Return(returnArguments ...interface{}) *Call { // Panic specifies if the functon call should fail and the panic message // -// Mock.On("DoSomething").Panic("test panic") +// Mock.On("DoSomething").Panic("test panic") func (c *Call) Panic(msg string) *Call { c.lock() defer c.unlock() @@ -123,14 +125,14 @@ func (c *Call) Panic(msg string) *Call { // Once indicates that that the mock should only return the value once. // -// Mock.On("MyMethod", arg1, arg2).Return(returnArg1, returnArg2).Once() +// Mock.On("MyMethod", arg1, arg2).Return(returnArg1, returnArg2).Once() func (c *Call) Once() *Call { return c.Times(1) } // Twice indicates that that the mock should only return the value twice. // -// Mock.On("MyMethod", arg1, arg2).Return(returnArg1, returnArg2).Twice() +// Mock.On("MyMethod", arg1, arg2).Return(returnArg1, returnArg2).Twice() func (c *Call) Twice() *Call { return c.Times(2) } @@ -138,7 +140,7 @@ func (c *Call) Twice() *Call { // Times indicates that that the mock should only return the indicated number // of times. // -// Mock.On("MyMethod", arg1, arg2).Return(returnArg1, returnArg2).Times(5) +// Mock.On("MyMethod", arg1, arg2).Return(returnArg1, returnArg2).Times(5) func (c *Call) Times(i int) *Call { c.lock() defer c.unlock() @@ -149,7 +151,7 @@ func (c *Call) Times(i int) *Call { // WaitUntil sets the channel that will block the mock's return until its closed // or a message is received. // -// Mock.On("MyMethod", arg1, arg2).WaitUntil(time.After(time.Second)) +// Mock.On("MyMethod", arg1, arg2).WaitUntil(time.After(time.Second)) func (c *Call) WaitUntil(w <-chan time.Time) *Call { c.lock() defer c.unlock() @@ -159,7 +161,7 @@ func (c *Call) WaitUntil(w <-chan time.Time) *Call { // After sets how long to block until the call returns // -// Mock.On("MyMethod", arg1, arg2).After(time.Second) +// Mock.On("MyMethod", arg1, arg2).After(time.Second) func (c *Call) After(d time.Duration) *Call { c.lock() defer c.unlock() @@ -171,10 +173,10 @@ func (c *Call) After(d time.Duration) *Call { // mocking a method (such as an unmarshaler) that takes a pointer to a struct and // sets properties in such struct // -// Mock.On("Unmarshal", AnythingOfType("*map[string]interface{}")).Return().Run(func(args Arguments) { -// arg := args.Get(0).(*map[string]interface{}) -// arg["foo"] = "bar" -// }) +// Mock.On("Unmarshal", AnythingOfType("*map[string]interface{}")).Return().Run(func(args Arguments) { +// arg := args.Get(0).(*map[string]interface{}) +// arg["foo"] = "bar" +// }) func (c *Call) Run(fn func(args Arguments)) *Call { c.lock() defer c.unlock() @@ -194,16 +196,18 @@ func (c *Call) Maybe() *Call { // On chains a new expectation description onto the mocked interface. This // allows syntax like. // -// Mock. -// On("MyMethod", 1).Return(nil). -// On("MyOtherMethod", 'a', 'b', 'c').Return(errors.New("Some Error")) +// Mock. +// On("MyMethod", 1).Return(nil). +// On("MyOtherMethod", 'a', 'b', 'c').Return(errors.New("Some Error")) +// //go:noinline func (c *Call) On(methodName string, arguments ...interface{}) *Call { return c.Parent.On(methodName, arguments...) } // Unset removes a mock handler from being called. -// test.On("func", mock.Anything).Unset() +// +// test.On("func", mock.Anything).Unset() func (c *Call) Unset() *Call { var unlockOnce sync.Once @@ -218,16 +222,22 @@ func (c *Call) Unset() *Call { foundMatchingCall := false - for i, call := range c.Parent.ExpectedCalls { + // in-place filter slice for calls to be removed - iterate from 0'th to last skipping unnecessary ones + var index int // write index + for _, call := range c.Parent.ExpectedCalls { if call.Method == c.Method { _, diffCount := call.Arguments.Diff(c.Arguments) if diffCount == 0 { foundMatchingCall = true - // Remove from ExpectedCalls - c.Parent.ExpectedCalls = append(c.Parent.ExpectedCalls[:i], c.Parent.ExpectedCalls[i+1:]...) + // Remove from ExpectedCalls - just skip it + continue } } + c.Parent.ExpectedCalls[index] = call + index++ } + // trim slice up to last copied index + c.Parent.ExpectedCalls = c.Parent.ExpectedCalls[:index] if !foundMatchingCall { unlockOnce.Do(c.unlock) @@ -243,9 +253,9 @@ func (c *Call) Unset() *Call { // calls have been called as expected. The referenced calls may be from the // same mock instance and/or other mock instances. // -// Mock.On("Do").Return(nil).Notbefore( -// Mock.On("Init").Return(nil) -// ) +// Mock.On("Do").Return(nil).Notbefore( +// Mock.On("Init").Return(nil) +// ) func (c *Call) NotBefore(calls ...*Call) *Call { c.lock() defer c.unlock() @@ -328,7 +338,7 @@ func (m *Mock) fail(format string, args ...interface{}) { // On starts a description of an expectation of the specified method // being called. // -// Mock.On("MyMethod", arg1, arg2) +// Mock.On("MyMethod", arg1, arg2) func (m *Mock) On(methodName string, arguments ...interface{}) *Call { for _, arg := range arguments { if v := reflect.ValueOf(arg); v.Kind() == reflect.Func { @@ -418,6 +428,10 @@ func callString(method string, arguments Arguments, includeArgumentValues bool) if includeArgumentValues { var argVals []string for argIndex, arg := range arguments { + if _, ok := arg.(*FunctionalOptionsArgument); ok { + argVals = append(argVals, fmt.Sprintf("%d: %s", argIndex, arg)) + continue + } argVals = append(argVals, fmt.Sprintf("%d: %#v", argIndex, arg)) } argValsString = fmt.Sprintf("\n\t\t%s", strings.Join(argVals, "\n\t\t")) @@ -752,6 +766,7 @@ type AnythingOfTypeArgument string // name of the type to check for. Used in Diff and Assert. // // For example: +// // Assert(t, AnythingOfType("string"), AnythingOfType("int")) func AnythingOfType(t string) AnythingOfTypeArgument { return AnythingOfTypeArgument(t) @@ -774,6 +789,34 @@ func IsType(t interface{}) *IsTypeArgument { return &IsTypeArgument{t: t} } +// FunctionalOptionsArgument is a struct that contains the type and value of an functional option argument +// for use when type checking. +type FunctionalOptionsArgument struct { + value interface{} +} + +// String returns the string representation of FunctionalOptionsArgument +func (f *FunctionalOptionsArgument) String() string { + var name string + tValue := reflect.ValueOf(f.value) + if tValue.Len() > 0 { + name = "[]" + reflect.TypeOf(tValue.Index(0).Interface()).String() + } + + return strings.Replace(fmt.Sprintf("%#v", f.value), "[]interface {}", name, 1) +} + +// FunctionalOptions returns an FunctionalOptionsArgument object containing the functional option type +// and the values to check of +// +// For example: +// Assert(t, FunctionalOptions("[]foo.FunctionalOption", foo.Opt1(), foo.Opt2())) +func FunctionalOptions(value ...interface{}) *FunctionalOptionsArgument { + return &FunctionalOptionsArgument{ + value: value, + } +} + // argumentMatcher performs custom argument matching, returning whether or // not the argument is matched by the expectation fixture function. type argumentMatcher struct { @@ -920,6 +963,29 @@ func (args Arguments) Diff(objects []interface{}) (string, int) { differences++ output = fmt.Sprintf("%s\t%d: FAIL: type %s != type %s - %s\n", output, i, reflect.TypeOf(t).Name(), reflect.TypeOf(actual).Name(), actualFmt) } + } else if reflect.TypeOf(expected) == reflect.TypeOf((*FunctionalOptionsArgument)(nil)) { + t := expected.(*FunctionalOptionsArgument).value + + var name string + tValue := reflect.ValueOf(t) + if tValue.Len() > 0 { + name = "[]" + reflect.TypeOf(tValue.Index(0).Interface()).String() + } + + tName := reflect.TypeOf(t).Name() + if name != reflect.TypeOf(actual).String() && tValue.Len() != 0 { + differences++ + output = fmt.Sprintf("%s\t%d: FAIL: type %s != type %s - %s\n", output, i, tName, reflect.TypeOf(actual).Name(), actualFmt) + } else { + if ef, af := assertOpts(t, actual); ef == "" && af == "" { + // match + output = fmt.Sprintf("%s\t%d: PASS: %s == %s\n", output, i, tName, tName) + } else { + // not match + differences++ + output = fmt.Sprintf("%s\t%d: FAIL: %s != %s\n", output, i, af, ef) + } + } } else { // normal checking @@ -1096,3 +1162,65 @@ var spewConfig = spew.ConfigState{ type tHelper interface { Helper() } + +func assertOpts(expected, actual interface{}) (expectedFmt, actualFmt string) { + expectedOpts := reflect.ValueOf(expected) + actualOpts := reflect.ValueOf(actual) + var expectedNames []string + for i := 0; i < expectedOpts.Len(); i++ { + expectedNames = append(expectedNames, funcName(expectedOpts.Index(i).Interface())) + } + var actualNames []string + for i := 0; i < actualOpts.Len(); i++ { + actualNames = append(actualNames, funcName(actualOpts.Index(i).Interface())) + } + if !assert.ObjectsAreEqual(expectedNames, actualNames) { + expectedFmt = fmt.Sprintf("%v", expectedNames) + actualFmt = fmt.Sprintf("%v", actualNames) + return + } + + for i := 0; i < expectedOpts.Len(); i++ { + expectedOpt := expectedOpts.Index(i).Interface() + actualOpt := actualOpts.Index(i).Interface() + + expectedFunc := expectedNames[i] + actualFunc := actualNames[i] + if expectedFunc != actualFunc { + expectedFmt = expectedFunc + actualFmt = actualFunc + return + } + + ot := reflect.TypeOf(expectedOpt) + var expectedValues []reflect.Value + var actualValues []reflect.Value + if ot.NumIn() == 0 { + return + } + + for i := 0; i < ot.NumIn(); i++ { + vt := ot.In(i).Elem() + expectedValues = append(expectedValues, reflect.New(vt)) + actualValues = append(actualValues, reflect.New(vt)) + } + + reflect.ValueOf(expectedOpt).Call(expectedValues) + reflect.ValueOf(actualOpt).Call(actualValues) + + for i := 0; i < ot.NumIn(); i++ { + if !assert.ObjectsAreEqual(expectedValues[i].Interface(), actualValues[i].Interface()) { + expectedFmt = fmt.Sprintf("%s %+v", expectedNames[i], expectedValues[i].Interface()) + actualFmt = fmt.Sprintf("%s %+v", expectedNames[i], actualValues[i].Interface()) + return + } + } + } + + return "", "" +} + +func funcName(opt interface{}) string { + n := runtime.FuncForPC(reflect.ValueOf(opt).Pointer()).Name() + return strings.TrimSuffix(path.Base(n), path.Ext(n)) +} diff --git a/vendor/go.opencensus.io/Makefile b/vendor/go.opencensus.io/Makefile index b3ce3df30..d896edc99 100644 --- a/vendor/go.opencensus.io/Makefile +++ b/vendor/go.opencensus.io/Makefile @@ -91,7 +91,7 @@ embedmd: .PHONY: install-tools install-tools: - go get -u golang.org/x/lint/golint - go get -u golang.org/x/tools/cmd/cover - go get -u golang.org/x/tools/cmd/goimports - go get -u github.com/rakyll/embedmd + go install golang.org/x/lint/golint@latest + go install golang.org/x/tools/cmd/cover@latest + go install golang.org/x/tools/cmd/goimports@latest + go install github.com/rakyll/embedmd@latest diff --git a/vendor/go.opencensus.io/opencensus.go b/vendor/go.opencensus.io/opencensus.go index e5e4b4368..11e31f421 100644 --- a/vendor/go.opencensus.io/opencensus.go +++ b/vendor/go.opencensus.io/opencensus.go @@ -17,5 +17,5 @@ package opencensus // import "go.opencensus.io" // Version is the current release version of OpenCensus in use. func Version() string { - return "0.23.0" + return "0.24.0" } diff --git a/vendor/go.opencensus.io/plugin/ochttp/server.go b/vendor/go.opencensus.io/plugin/ochttp/server.go index c7ea64235..f7c8434be 100644 --- a/vendor/go.opencensus.io/plugin/ochttp/server.go +++ b/vendor/go.opencensus.io/plugin/ochttp/server.go @@ -31,14 +31,14 @@ import ( // Handler is an http.Handler wrapper to instrument your HTTP server with // OpenCensus. It supports both stats and tracing. // -// Tracing +// # Tracing // // This handler is aware of the incoming request's span, reading it from request // headers as configured using the Propagation field. // The extracted span can be accessed from the incoming request's // context. // -// span := trace.FromContext(r.Context()) +// span := trace.FromContext(r.Context()) // // The server span will be automatically ended at the end of ServeHTTP. type Handler struct { @@ -224,7 +224,9 @@ func (t *trackingResponseWriter) WriteHeader(statusCode int) { } // wrappedResponseWriter returns a wrapped version of the original -// ResponseWriter and only implements the same combination of additional +// +// ResponseWriter and only implements the same combination of additional +// // interfaces as the original. // This implementation is based on https://github.com/felixge/httpsnoop. func (t *trackingResponseWriter) wrappedResponseWriter() http.ResponseWriter { diff --git a/vendor/go.opencensus.io/stats/doc.go b/vendor/go.opencensus.io/stats/doc.go index 00d473ee0..31477a464 100644 --- a/vendor/go.opencensus.io/stats/doc.go +++ b/vendor/go.opencensus.io/stats/doc.go @@ -19,7 +19,7 @@ Package stats contains support for OpenCensus stats recording. OpenCensus allows users to create typed measures, record measurements, aggregate the collected data, and export the aggregated data. -Measures +# Measures A measure represents a type of data point to be tracked and recorded. For example, latency, request Mb/s, and response Mb/s are measures @@ -33,7 +33,7 @@ Libraries can define and export measures. Application authors can then create views and collect and break down measures by the tags they are interested in. -Recording measurements +# Recording measurements Measurement is a data point to be collected for a measure. For example, for a latency (ms) measure, 100 is a measurement that represents a 100ms @@ -49,7 +49,7 @@ Libraries can always record measurements, and applications can later decide on which measurements they want to collect by registering views. This allows libraries to turn on the instrumentation by default. -Exemplars +# Exemplars For a given recorded measurement, the associated exemplar is a diagnostic map that gives more information about the measurement. @@ -64,6 +64,5 @@ then the trace span will be added to the exemplar associated with the measuremen When exported to a supporting back end, you should be able to easily navigate to example traces that fell into each bucket in the Distribution. - */ package stats // import "go.opencensus.io/stats" diff --git a/vendor/go.opencensus.io/stats/internal/record.go b/vendor/go.opencensus.io/stats/internal/record.go index 36935e629..436dc791f 100644 --- a/vendor/go.opencensus.io/stats/internal/record.go +++ b/vendor/go.opencensus.io/stats/internal/record.go @@ -21,5 +21,11 @@ import ( // DefaultRecorder will be called for each Record call. var DefaultRecorder func(tags *tag.Map, measurement interface{}, attachments map[string]interface{}) +// MeasurementRecorder will be called for each Record call. This is the same as DefaultRecorder but +// avoids interface{} conversion. +// This will be a func(tags *tag.Map, measurement []Measurement, attachments map[string]interface{}) type, +// but is interface{} here to avoid import loops +var MeasurementRecorder interface{} + // SubscriptionReporter reports when a view subscribed with a measure. var SubscriptionReporter func(measure string) diff --git a/vendor/go.opencensus.io/stats/record.go b/vendor/go.opencensus.io/stats/record.go index 2b9728346..8b5b99803 100644 --- a/vendor/go.opencensus.io/stats/record.go +++ b/vendor/go.opencensus.io/stats/record.go @@ -86,10 +86,29 @@ func createRecordOption(ros ...Options) *recordOptions { return o } +type measurementRecorder = func(tags *tag.Map, measurement []Measurement, attachments map[string]interface{}) + // Record records one or multiple measurements with the same context at once. // If there are any tags in the context, measurements will be tagged with them. func Record(ctx context.Context, ms ...Measurement) { - RecordWithOptions(ctx, WithMeasurements(ms...)) + // Record behaves the same as RecordWithOptions, but because we do not have to handle generic functionality + // (RecordOptions) we can reduce some allocations to speed up this hot path + if len(ms) == 0 { + return + } + recorder := internal.MeasurementRecorder.(measurementRecorder) + record := false + for _, m := range ms { + if m.desc.subscribed() { + record = true + break + } + } + if !record { + return + } + recorder(tag.FromContext(ctx), ms, nil) + return } // RecordWithTags records one or multiple measurements at once. diff --git a/vendor/go.opencensus.io/stats/view/aggregation.go b/vendor/go.opencensus.io/stats/view/aggregation.go index 748bd568c..61f72d20d 100644 --- a/vendor/go.opencensus.io/stats/view/aggregation.go +++ b/vendor/go.opencensus.io/stats/view/aggregation.go @@ -90,9 +90,9 @@ func Sum() *Aggregation { // // If len(bounds) >= 2 then the boundaries for bucket index i are: // -// [-infinity, bounds[i]) for i = 0 -// [bounds[i-1], bounds[i]) for 0 < i < length -// [bounds[i-1], +infinity) for i = length +// [-infinity, bounds[i]) for i = 0 +// [bounds[i-1], bounds[i]) for 0 < i < length +// [bounds[i-1], +infinity) for i = length // // If len(bounds) is 0 then there is no histogram associated with the // distribution. There will be a single bucket with boundaries diff --git a/vendor/go.opencensus.io/stats/view/collector.go b/vendor/go.opencensus.io/stats/view/collector.go index ac22c93a2..bcd6e08c7 100644 --- a/vendor/go.opencensus.io/stats/view/collector.go +++ b/vendor/go.opencensus.io/stats/view/collector.go @@ -59,8 +59,15 @@ func (c *collector) clearRows() { // encodeWithKeys encodes the map by using values // only associated with the keys provided. func encodeWithKeys(m *tag.Map, keys []tag.Key) []byte { + // Compute the buffer length we will need ahead of time to avoid resizing later + reqLen := 0 + for _, k := range keys { + s, _ := m.Value(k) + // We will store each key + its length + reqLen += len(s) + 1 + } vb := &tagencoding.Values{ - Buffer: make([]byte, len(keys)), + Buffer: make([]byte, reqLen), } for _, k := range keys { v, _ := m.Value(k) diff --git a/vendor/go.opencensus.io/stats/view/doc.go b/vendor/go.opencensus.io/stats/view/doc.go index 7bbedfe1f..60bf0e392 100644 --- a/vendor/go.opencensus.io/stats/view/doc.go +++ b/vendor/go.opencensus.io/stats/view/doc.go @@ -34,7 +34,7 @@ // Libraries can define views but it is recommended that in most cases registering // views be left up to applications. // -// Exporting +// # Exporting // // Collected and aggregated data can be exported to a metric collection // backend by registering its exporter. diff --git a/vendor/go.opencensus.io/stats/view/worker.go b/vendor/go.opencensus.io/stats/view/worker.go index 6e8d18b7f..6a79cd8a3 100644 --- a/vendor/go.opencensus.io/stats/view/worker.go +++ b/vendor/go.opencensus.io/stats/view/worker.go @@ -33,6 +33,7 @@ func init() { defaultWorker = NewMeter().(*worker) go defaultWorker.start() internal.DefaultRecorder = record + internal.MeasurementRecorder = recordMeasurement } type measureRef struct { @@ -199,11 +200,21 @@ func record(tags *tag.Map, ms interface{}, attachments map[string]interface{}) { defaultWorker.Record(tags, ms, attachments) } +func recordMeasurement(tags *tag.Map, ms []stats.Measurement, attachments map[string]interface{}) { + defaultWorker.recordMeasurement(tags, ms, attachments) +} + // Record records a set of measurements ms associated with the given tags and attachments. func (w *worker) Record(tags *tag.Map, ms interface{}, attachments map[string]interface{}) { + w.recordMeasurement(tags, ms.([]stats.Measurement), attachments) +} + +// recordMeasurement records a set of measurements ms associated with the given tags and attachments. +// This is the same as Record but without an interface{} type to avoid allocations +func (w *worker) recordMeasurement(tags *tag.Map, ms []stats.Measurement, attachments map[string]interface{}) { req := &recordReq{ tm: tags, - ms: ms.([]stats.Measurement), + ms: ms, attachments: attachments, t: time.Now(), } @@ -221,6 +232,11 @@ func SetReportingPeriod(d time.Duration) { defaultWorker.SetReportingPeriod(d) } +// Stop stops the default worker. +func Stop() { + defaultWorker.Stop() +} + // SetReportingPeriod sets the interval between reporting aggregated views in // the program. If duration is less than or equal to zero, it enables the // default behavior. @@ -281,7 +297,7 @@ func (w *worker) start() { case <-w.quit: w.timer.Stop() close(w.c) - w.done <- true + close(w.done) return } } @@ -290,8 +306,11 @@ func (w *worker) start() { func (w *worker) Stop() { prodMgr := metricproducer.GlobalManager() prodMgr.DeleteProducer(w) - - w.quit <- true + select { + case <-w.quit: + default: + close(w.quit) + } <-w.done } diff --git a/vendor/go.opencensus.io/tag/profile_19.go b/vendor/go.opencensus.io/tag/profile_19.go index b34d95e34..8fb17226f 100644 --- a/vendor/go.opencensus.io/tag/profile_19.go +++ b/vendor/go.opencensus.io/tag/profile_19.go @@ -12,6 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build go1.9 // +build go1.9 package tag diff --git a/vendor/go.opencensus.io/tag/profile_not19.go b/vendor/go.opencensus.io/tag/profile_not19.go index 83adbce56..e28cf13cd 100644 --- a/vendor/go.opencensus.io/tag/profile_not19.go +++ b/vendor/go.opencensus.io/tag/profile_not19.go @@ -12,6 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !go1.9 // +build !go1.9 package tag diff --git a/vendor/go.opencensus.io/trace/doc.go b/vendor/go.opencensus.io/trace/doc.go index 04b1ee4f3..7a1616a55 100644 --- a/vendor/go.opencensus.io/trace/doc.go +++ b/vendor/go.opencensus.io/trace/doc.go @@ -18,24 +18,23 @@ Package trace contains support for OpenCensus distributed tracing. The following assumes a basic familiarity with OpenCensus concepts. See http://opencensus.io - -Exporting Traces +# Exporting Traces To export collected tracing data, register at least one exporter. You can use one of the provided exporters or write your own. - trace.RegisterExporter(exporter) + trace.RegisterExporter(exporter) By default, traces will be sampled relatively rarely. To change the sampling frequency for your entire program, call ApplyConfig. Use a ProbabilitySampler to sample a subset of traces, or use AlwaysSample to collect a trace on every run: - trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()}) + trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()}) Be careful about using trace.AlwaysSample in a production application with significant traffic: a new trace will be started and exported for every request. -Adding Spans to a Trace +# Adding Spans to a Trace A trace consists of a tree of spans. In Go, the current span is carried in a context.Context. @@ -44,8 +43,8 @@ It is common to want to capture all the activity of a function call in a span. F this to work, the function must take a context.Context as a parameter. Add these two lines to the top of the function: - ctx, span := trace.StartSpan(ctx, "example.com/Run") - defer span.End() + ctx, span := trace.StartSpan(ctx, "example.com/Run") + defer span.End() StartSpan will create a new top-level span if the context doesn't contain another span, otherwise it will create a child span. diff --git a/vendor/go.opencensus.io/trace/lrumap.go b/vendor/go.opencensus.io/trace/lrumap.go index 908c2497e..80095a5f6 100644 --- a/vendor/go.opencensus.io/trace/lrumap.go +++ b/vendor/go.opencensus.io/trace/lrumap.go @@ -44,7 +44,7 @@ func (lm lruMap) len() int { } func (lm lruMap) keys() []interface{} { - keys := make([]interface{}, len(lm.cacheKeys)) + keys := make([]interface{}, 0, len(lm.cacheKeys)) for k := range lm.cacheKeys { keys = append(keys, k) } diff --git a/vendor/go.opencensus.io/trace/trace_go11.go b/vendor/go.opencensus.io/trace/trace_go11.go index b7d8aaf28..b8fc1e495 100644 --- a/vendor/go.opencensus.io/trace/trace_go11.go +++ b/vendor/go.opencensus.io/trace/trace_go11.go @@ -12,6 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build go1.11 // +build go1.11 package trace diff --git a/vendor/go.opencensus.io/trace/trace_nongo11.go b/vendor/go.opencensus.io/trace/trace_nongo11.go index e25419859..da488fc87 100644 --- a/vendor/go.opencensus.io/trace/trace_nongo11.go +++ b/vendor/go.opencensus.io/trace/trace_nongo11.go @@ -12,6 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !go1.11 // +build !go1.11 package trace diff --git a/vendor/golang.org/x/exp/slices/slices.go b/vendor/golang.org/x/exp/slices/slices.go index 8a237c5d6..a17b3cf69 100644 --- a/vendor/golang.org/x/exp/slices/slices.go +++ b/vendor/golang.org/x/exp/slices/slices.go @@ -151,9 +151,14 @@ func Insert[S ~[]E, E any](s S, i int, v ...E) S { // Delete removes the elements s[i:j] from s, returning the modified slice. // Delete panics if s[i:j] is not a valid slice of s. // Delete modifies the contents of the slice s; it does not create a new slice. -// Delete is O(len(s)-(j-i)), so if many items must be deleted, it is better to +// Delete is O(len(s)-j), so if many items must be deleted, it is better to // make a single call deleting them all together than to delete one at a time. +// Delete might not modify the elements s[len(s)-(j-i):len(s)]. If those +// elements contain pointers you might consider zeroing those elements so that +// objects they reference can be garbage collected. func Delete[S ~[]E, E any](s S, i, j int) S { + _ = s[i:j] // bounds check + return append(s[:i], s[j:]...) } diff --git a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go deleted file mode 100644 index 37dc0cfdb..000000000 --- a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package ctxhttp provides helper functions for performing context-aware HTTP requests. -package ctxhttp // import "golang.org/x/net/context/ctxhttp" - -import ( - "context" - "io" - "net/http" - "net/url" - "strings" -) - -// Do sends an HTTP request with the provided http.Client and returns -// an HTTP response. -// -// If the client is nil, http.DefaultClient is used. -// -// The provided ctx must be non-nil. If it is canceled or times out, -// ctx.Err() will be returned. -func Do(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) { - if client == nil { - client = http.DefaultClient - } - resp, err := client.Do(req.WithContext(ctx)) - // If we got an error, and the context has been canceled, - // the context's error is probably more useful. - if err != nil { - select { - case <-ctx.Done(): - err = ctx.Err() - default: - } - } - return resp, err -} - -// Get issues a GET request via the Do function. -func Get(ctx context.Context, client *http.Client, url string) (*http.Response, error) { - req, err := http.NewRequest("GET", url, nil) - if err != nil { - return nil, err - } - return Do(ctx, client, req) -} - -// Head issues a HEAD request via the Do function. -func Head(ctx context.Context, client *http.Client, url string) (*http.Response, error) { - req, err := http.NewRequest("HEAD", url, nil) - if err != nil { - return nil, err - } - return Do(ctx, client, req) -} - -// Post issues a POST request via the Do function. -func Post(ctx context.Context, client *http.Client, url string, bodyType string, body io.Reader) (*http.Response, error) { - req, err := http.NewRequest("POST", url, body) - if err != nil { - return nil, err - } - req.Header.Set("Content-Type", bodyType) - return Do(ctx, client, req) -} - -// PostForm issues a POST request via the Do function. -func PostForm(ctx context.Context, client *http.Client, url string, data url.Values) (*http.Response, error) { - return Post(ctx, client, url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode())) -} diff --git a/vendor/golang.org/x/oauth2/AUTHORS b/vendor/golang.org/x/oauth2/AUTHORS deleted file mode 100644 index 15167cd74..000000000 --- a/vendor/golang.org/x/oauth2/AUTHORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code refers to The Go Authors for copyright purposes. -# The master list of authors is in the main Go distribution, -# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/oauth2/CONTRIBUTORS b/vendor/golang.org/x/oauth2/CONTRIBUTORS deleted file mode 100644 index 1c4577e96..000000000 --- a/vendor/golang.org/x/oauth2/CONTRIBUTORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code was written by the Go contributors. -# The master list of contributors is in the main Go distribution, -# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/oauth2/README.md b/vendor/golang.org/x/oauth2/README.md index 1473e1296..781770c20 100644 --- a/vendor/golang.org/x/oauth2/README.md +++ b/vendor/golang.org/x/oauth2/README.md @@ -19,7 +19,7 @@ See pkg.go.dev for further documentation and examples. * [pkg.go.dev/golang.org/x/oauth2](https://pkg.go.dev/golang.org/x/oauth2) * [pkg.go.dev/golang.org/x/oauth2/google](https://pkg.go.dev/golang.org/x/oauth2/google) -## Policy for new packages +## Policy for new endpoints We no longer accept new provider-specific packages in this repo if all they do is add a single endpoint variable. If you just want to add a @@ -29,8 +29,12 @@ package. ## Report Issues / Send Patches -This repository uses Gerrit for code changes. To learn how to submit changes to -this repository, see https://golang.org/doc/contribute.html. - The main issue tracker for the oauth2 repository is located at https://github.com/golang/oauth2/issues. + +This repository uses Gerrit for code changes. To learn how to submit changes to +this repository, see https://golang.org/doc/contribute.html. In particular: + +* Excluding trivial changes, all contributions should be connected to an existing issue. +* API changes must go through the [change proposal process](https://go.dev/s/proposal-process) before they can be accepted. +* The code owners are listed at [dev.golang.org/owners](https://dev.golang.org/owners#:~:text=x/oauth2). diff --git a/vendor/golang.org/x/oauth2/google/default.go b/vendor/golang.org/x/oauth2/google/default.go index 7ed02cd41..b3e8783cc 100644 --- a/vendor/golang.org/x/oauth2/google/default.go +++ b/vendor/golang.org/x/oauth2/google/default.go @@ -13,12 +13,15 @@ import ( "os" "path/filepath" "runtime" + "time" "cloud.google.com/go/compute/metadata" "golang.org/x/oauth2" "golang.org/x/oauth2/authhandler" ) +const adcSetupURL = "https://cloud.google.com/docs/authentication/external/set-up-adc" + // Credentials holds Google credentials, including "Application Default Credentials". // For more details, see: // https://developers.google.com/accounts/docs/application-default-credentials @@ -62,6 +65,18 @@ type CredentialsParams struct { // PKCE is used to support PKCE flow. Optional for 3LO flow. PKCE *authhandler.PKCEParams + + // The OAuth2 TokenURL default override. This value overrides the default TokenURL, + // unless explicitly specified by the credentials config file. Optional. + TokenURL string + + // EarlyTokenRefresh is the amount of time before a token expires that a new + // token will be preemptively fetched. If unset the default value is 10 + // seconds. + // + // Note: This option is currently only respected when using credentials + // fetched from the GCE metadata server. + EarlyTokenRefresh time.Duration } func (params CredentialsParams) deepCopy() CredentialsParams { @@ -137,7 +152,7 @@ func FindDefaultCredentialsWithParams(ctx context.Context, params CredentialsPar // use those credentials. App Engine standard second generation runtimes (>= Go 1.11) // and App Engine flexible use ComputeTokenSource and the metadata server. if appengineTokenFunc != nil { - return &DefaultCredentials{ + return &Credentials{ ProjectID: appengineAppIDFunc(ctx), TokenSource: AppEngineTokenSource(ctx, params.Scopes...), }, nil @@ -147,15 +162,14 @@ func FindDefaultCredentialsWithParams(ctx context.Context, params CredentialsPar // or App Engine flexible, use the metadata server. if metadata.OnGCE() { id, _ := metadata.ProjectID() - return &DefaultCredentials{ + return &Credentials{ ProjectID: id, - TokenSource: ComputeTokenSource("", params.Scopes...), + TokenSource: computeTokenSource("", params.EarlyTokenRefresh, params.Scopes...), }, nil } // None are found; return helpful error. - const url = "https://developers.google.com/accounts/docs/application-default-credentials" - return nil, fmt.Errorf("google: could not find default credentials. See %v for more information.", url) + return nil, fmt.Errorf("google: could not find default credentials. See %v for more information", adcSetupURL) } // FindDefaultCredentials invokes FindDefaultCredentialsWithParams with the specified scopes. @@ -194,7 +208,7 @@ func CredentialsFromJSONWithParams(ctx context.Context, jsonData []byte, params return nil, err } ts = newErrWrappingTokenSource(ts) - return &DefaultCredentials{ + return &Credentials{ ProjectID: f.ProjectID, TokenSource: ts, JSON: jsonData, @@ -216,7 +230,7 @@ func wellKnownFile() string { return filepath.Join(guessUnixHomeDir(), ".config", "gcloud", f) } -func readCredentialsFile(ctx context.Context, filename string, params CredentialsParams) (*DefaultCredentials, error) { +func readCredentialsFile(ctx context.Context, filename string, params CredentialsParams) (*Credentials, error) { b, err := ioutil.ReadFile(filename) if err != nil { return nil, err diff --git a/vendor/golang.org/x/oauth2/google/doc.go b/vendor/golang.org/x/oauth2/google/doc.go index dddf65144..ca717634a 100644 --- a/vendor/golang.org/x/oauth2/google/doc.go +++ b/vendor/golang.org/x/oauth2/google/doc.go @@ -26,7 +26,7 @@ // // Using workload identity federation, your application can access Google Cloud // resources from Amazon Web Services (AWS), Microsoft Azure or any identity -// provider that supports OpenID Connect (OIDC). +// provider that supports OpenID Connect (OIDC) or SAML 2.0. // Traditionally, applications running outside Google Cloud have used service // account keys to access Google Cloud resources. Using identity federation, // you can allow your workload to impersonate a service account. @@ -36,20 +36,75 @@ // Follow the detailed instructions on how to configure Workload Identity Federation // in various platforms: // -// Amazon Web Services (AWS): https://cloud.google.com/iam/docs/access-resources-aws -// Microsoft Azure: https://cloud.google.com/iam/docs/access-resources-azure -// OIDC identity provider: https://cloud.google.com/iam/docs/access-resources-oidc +// Amazon Web Services (AWS): https://cloud.google.com/iam/docs/workload-identity-federation-with-other-clouds#aws +// Microsoft Azure: https://cloud.google.com/iam/docs/workload-identity-federation-with-other-clouds#azure +// OIDC identity provider: https://cloud.google.com/iam/docs/workload-identity-federation-with-other-providers#oidc +// SAML 2.0 identity provider: https://cloud.google.com/iam/docs/workload-identity-federation-with-other-providers#saml // -// For OIDC providers, the library can retrieve OIDC tokens either from a -// local file location (file-sourced credentials) or from a local server -// (URL-sourced credentials). +// For OIDC and SAML providers, the library can retrieve tokens in three ways: +// from a local file location (file-sourced credentials), from a server +// (URL-sourced credentials), or from a local executable (executable-sourced +// credentials). // For file-sourced credentials, a background process needs to be continuously -// refreshing the file location with a new OIDC token prior to expiration. +// refreshing the file location with a new OIDC/SAML token prior to expiration. // For tokens with one hour lifetimes, the token needs to be updated in the file // every hour. The token can be stored directly as plain text or in JSON format. // For URL-sourced credentials, a local server needs to host a GET endpoint to -// return the OIDC token. The response can be in plain text or JSON. +// return the OIDC/SAML token. The response can be in plain text or JSON. // Additional required request headers can also be specified. +// For executable-sourced credentials, an application needs to be available to +// output the OIDC/SAML token and other information in a JSON format. +// For more information on how these work (and how to implement +// executable-sourced credentials), please check out: +// https://cloud.google.com/iam/docs/workload-identity-federation-with-other-providers#create_a_credential_configuration +// +// Note that this library does not perform any validation on the token_url, token_info_url, +// or service_account_impersonation_url fields of the credential configuration. +// It is not recommended to use a credential configuration that you did not generate with +// the gcloud CLI unless you verify that the URL fields point to a googleapis.com domain. +// +// # Workforce Identity Federation +// +// Workforce identity federation lets you use an external identity provider (IdP) to +// authenticate and authorize a workforce—a group of users, such as employees, partners, +// and contractors—using IAM, so that the users can access Google Cloud services. +// Workforce identity federation extends Google Cloud's identity capabilities to support +// syncless, attribute-based single sign on. +// +// With workforce identity federation, your workforce can access Google Cloud resources +// using an external identity provider (IdP) that supports OpenID Connect (OIDC) or +// SAML 2.0 such as Azure Active Directory (Azure AD), Active Directory Federation +// Services (AD FS), Okta, and others. +// +// Follow the detailed instructions on how to configure Workload Identity Federation +// in various platforms: +// +// Azure AD: https://cloud.google.com/iam/docs/workforce-sign-in-azure-ad +// Okta: https://cloud.google.com/iam/docs/workforce-sign-in-okta +// OIDC identity provider: https://cloud.google.com/iam/docs/configuring-workforce-identity-federation#oidc +// SAML 2.0 identity provider: https://cloud.google.com/iam/docs/configuring-workforce-identity-federation#saml +// +// For workforce identity federation, the library can retrieve tokens in three ways: +// from a local file location (file-sourced credentials), from a server +// (URL-sourced credentials), or from a local executable (executable-sourced +// credentials). +// For file-sourced credentials, a background process needs to be continuously +// refreshing the file location with a new OIDC/SAML token prior to expiration. +// For tokens with one hour lifetimes, the token needs to be updated in the file +// every hour. The token can be stored directly as plain text or in JSON format. +// For URL-sourced credentials, a local server needs to host a GET endpoint to +// return the OIDC/SAML token. The response can be in plain text or JSON. +// Additional required request headers can also be specified. +// For executable-sourced credentials, an application needs to be available to +// output the OIDC/SAML token and other information in a JSON format. +// For more information on how these work (and how to implement +// executable-sourced credentials), please check out: +// https://cloud.google.com/iam/docs/workforce-obtaining-short-lived-credentials#generate_a_configuration_file_for_non-interactive_sign-in +// +// Note that this library does not perform any validation on the token_url, token_info_url, +// or service_account_impersonation_url fields of the credential configuration. +// It is not recommended to use a credential configuration that you did not generate with +// the gcloud CLI unless you verify that the URL fields point to a googleapis.com domain. // // # Credentials // diff --git a/vendor/golang.org/x/oauth2/google/google.go b/vendor/golang.org/x/oauth2/google/google.go index ceddd5dde..cc1223889 100644 --- a/vendor/golang.org/x/oauth2/google/google.go +++ b/vendor/golang.org/x/oauth2/google/google.go @@ -26,6 +26,9 @@ var Endpoint = oauth2.Endpoint{ AuthStyle: oauth2.AuthStyleInParams, } +// MTLSTokenURL is Google's OAuth 2.0 default mTLS endpoint. +const MTLSTokenURL = "https://oauth2.mtls.googleapis.com/token" + // JWTTokenURL is Google's OAuth 2.0 token URL to use with the JWT flow. const JWTTokenURL = "https://oauth2.googleapis.com/token" @@ -122,6 +125,7 @@ type credentialsFile struct { TokenURLExternal string `json:"token_url"` TokenInfoURL string `json:"token_info_url"` ServiceAccountImpersonationURL string `json:"service_account_impersonation_url"` + ServiceAccountImpersonation serviceAccountImpersonationInfo `json:"service_account_impersonation"` Delegates []string `json:"delegates"` CredentialSource externalaccount.CredentialSource `json:"credential_source"` QuotaProjectID string `json:"quota_project_id"` @@ -131,6 +135,10 @@ type credentialsFile struct { SourceCredentials *credentialsFile `json:"source_credentials"` } +type serviceAccountImpersonationInfo struct { + TokenLifetimeSeconds int `json:"token_lifetime_seconds"` +} + func (f *credentialsFile) jwtConfig(scopes []string, subject string) *jwt.Config { cfg := &jwt.Config{ Email: f.ClientEmail, @@ -167,7 +175,11 @@ func (f *credentialsFile) tokenSource(ctx context.Context, params CredentialsPar cfg.Endpoint.AuthURL = Endpoint.AuthURL } if cfg.Endpoint.TokenURL == "" { - cfg.Endpoint.TokenURL = Endpoint.TokenURL + if params.TokenURL != "" { + cfg.Endpoint.TokenURL = params.TokenURL + } else { + cfg.Endpoint.TokenURL = Endpoint.TokenURL + } } tok := &oauth2.Token{RefreshToken: f.RefreshToken} return cfg.TokenSource(ctx, tok), nil @@ -178,12 +190,13 @@ func (f *credentialsFile) tokenSource(ctx context.Context, params CredentialsPar TokenURL: f.TokenURLExternal, TokenInfoURL: f.TokenInfoURL, ServiceAccountImpersonationURL: f.ServiceAccountImpersonationURL, - ClientSecret: f.ClientSecret, - ClientID: f.ClientID, - CredentialSource: f.CredentialSource, - QuotaProjectID: f.QuotaProjectID, - Scopes: params.Scopes, - WorkforcePoolUserProject: f.WorkforcePoolUserProject, + ServiceAccountImpersonationLifetimeSeconds: f.ServiceAccountImpersonation.TokenLifetimeSeconds, + ClientSecret: f.ClientSecret, + ClientID: f.ClientID, + CredentialSource: f.CredentialSource, + QuotaProjectID: f.QuotaProjectID, + Scopes: params.Scopes, + WorkforcePoolUserProject: f.WorkforcePoolUserProject, } return cfg.TokenSource(ctx) case impersonatedServiceAccount: @@ -218,7 +231,11 @@ func (f *credentialsFile) tokenSource(ctx context.Context, params CredentialsPar // Further information about retrieving access tokens from the GCE metadata // server can be found at https://cloud.google.com/compute/docs/authentication. func ComputeTokenSource(account string, scope ...string) oauth2.TokenSource { - return oauth2.ReuseTokenSource(nil, computeSource{account: account, scopes: scope}) + return computeTokenSource(account, 0, scope...) +} + +func computeTokenSource(account string, earlyExpiry time.Duration, scope ...string) oauth2.TokenSource { + return oauth2.ReuseTokenSourceWithExpiry(nil, computeSource{account: account, scopes: scope}, earlyExpiry) } type computeSource struct { diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccount/aws.go b/vendor/golang.org/x/oauth2/google/internal/externalaccount/aws.go index e917195d5..2bf3202b2 100644 --- a/vendor/golang.org/x/oauth2/google/internal/externalaccount/aws.go +++ b/vendor/golang.org/x/oauth2/google/internal/externalaccount/aws.go @@ -62,6 +62,13 @@ const ( // The AWS authorization header name for the auto-generated date. awsDateHeader = "x-amz-date" + // Supported AWS configuration environment variables. + awsAccessKeyId = "AWS_ACCESS_KEY_ID" + awsDefaultRegion = "AWS_DEFAULT_REGION" + awsRegion = "AWS_REGION" + awsSecretAccessKey = "AWS_SECRET_ACCESS_KEY" + awsSessionToken = "AWS_SESSION_TOKEN" + awsTimeFormatLong = "20060102T150405Z" awsTimeFormatShort = "20060102" ) @@ -267,6 +274,49 @@ type awsRequest struct { Headers []awsRequestHeader `json:"headers"` } +func (cs awsCredentialSource) validateMetadataServers() error { + if err := cs.validateMetadataServer(cs.RegionURL, "region_url"); err != nil { + return err + } + if err := cs.validateMetadataServer(cs.CredVerificationURL, "url"); err != nil { + return err + } + return cs.validateMetadataServer(cs.IMDSv2SessionTokenURL, "imdsv2_session_token_url") +} + +var validHostnames []string = []string{"169.254.169.254", "fd00:ec2::254"} + +func (cs awsCredentialSource) isValidMetadataServer(metadataUrl string) bool { + if metadataUrl == "" { + // Zero value means use default, which is valid. + return true + } + + u, err := url.Parse(metadataUrl) + if err != nil { + // Unparseable URL means invalid + return false + } + + for _, validHostname := range validHostnames { + if u.Hostname() == validHostname { + // If it's one of the valid hostnames, everything is good + return true + } + } + + // hostname not found in our allowlist, so not valid + return false +} + +func (cs awsCredentialSource) validateMetadataServer(metadataUrl, urlName string) error { + if !cs.isValidMetadataServer(metadataUrl) { + return fmt.Errorf("oauth2/google: invalid hostname %s for %s", metadataUrl, urlName) + } + + return nil +} + func (cs awsCredentialSource) doRequest(req *http.Request) (*http.Response, error) { if cs.client == nil { cs.client = oauth2.NewClient(cs.ctx, nil) @@ -274,16 +324,33 @@ func (cs awsCredentialSource) doRequest(req *http.Request) (*http.Response, erro return cs.client.Do(req.WithContext(cs.ctx)) } +func canRetrieveRegionFromEnvironment() bool { + // The AWS region can be provided through AWS_REGION or AWS_DEFAULT_REGION. Only one is + // required. + return getenv(awsRegion) != "" || getenv(awsDefaultRegion) != "" +} + +func canRetrieveSecurityCredentialFromEnvironment() bool { + // Check if both AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY are available. + return getenv(awsAccessKeyId) != "" && getenv(awsSecretAccessKey) != "" +} + +func shouldUseMetadataServer() bool { + return !canRetrieveRegionFromEnvironment() || !canRetrieveSecurityCredentialFromEnvironment() +} + func (cs awsCredentialSource) subjectToken() (string, error) { if cs.requestSigner == nil { - awsSessionToken, err := cs.getAWSSessionToken() - if err != nil { - return "", err - } - headers := make(map[string]string) - if awsSessionToken != "" { - headers[awsIMDSv2SessionTokenHeader] = awsSessionToken + if shouldUseMetadataServer() { + awsSessionToken, err := cs.getAWSSessionToken() + if err != nil { + return "", err + } + + if awsSessionToken != "" { + headers[awsIMDSv2SessionTokenHeader] = awsSessionToken + } } awsSecurityCredentials, err := cs.getSecurityCredentials(headers) @@ -389,11 +456,11 @@ func (cs *awsCredentialSource) getAWSSessionToken() (string, error) { } func (cs *awsCredentialSource) getRegion(headers map[string]string) (string, error) { - if envAwsRegion := getenv("AWS_REGION"); envAwsRegion != "" { - return envAwsRegion, nil - } - if envAwsRegion := getenv("AWS_DEFAULT_REGION"); envAwsRegion != "" { - return envAwsRegion, nil + if canRetrieveRegionFromEnvironment() { + if envAwsRegion := getenv(awsRegion); envAwsRegion != "" { + return envAwsRegion, nil + } + return getenv("AWS_DEFAULT_REGION"), nil } if cs.RegionURL == "" { @@ -434,14 +501,12 @@ func (cs *awsCredentialSource) getRegion(headers map[string]string) (string, err } func (cs *awsCredentialSource) getSecurityCredentials(headers map[string]string) (result awsSecurityCredentials, err error) { - if accessKeyID := getenv("AWS_ACCESS_KEY_ID"); accessKeyID != "" { - if secretAccessKey := getenv("AWS_SECRET_ACCESS_KEY"); secretAccessKey != "" { - return awsSecurityCredentials{ - AccessKeyID: accessKeyID, - SecretAccessKey: secretAccessKey, - SecurityToken: getenv("AWS_SESSION_TOKEN"), - }, nil - } + if canRetrieveSecurityCredentialFromEnvironment() { + return awsSecurityCredentials{ + AccessKeyID: getenv(awsAccessKeyId), + SecretAccessKey: getenv(awsSecretAccessKey), + SecurityToken: getenv(awsSessionToken), + }, nil } roleName, err := cs.getMetadataRoleName(headers) diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccount/basecredentials.go b/vendor/golang.org/x/oauth2/google/internal/externalaccount/basecredentials.go index 83ce9c245..dcd252a61 100644 --- a/vendor/golang.org/x/oauth2/google/internal/externalaccount/basecredentials.go +++ b/vendor/golang.org/x/oauth2/google/internal/externalaccount/basecredentials.go @@ -39,6 +39,9 @@ type Config struct { // ServiceAccountImpersonationURL is the URL for the service account impersonation request. This is only // required for workload identity pools when APIs to be accessed have not integrated with UberMint. ServiceAccountImpersonationURL string + // ServiceAccountImpersonationLifetimeSeconds is the number of seconds the service account impersonation + // token will be valid for. + ServiceAccountImpersonationLifetimeSeconds int // ClientSecret is currently only required if token_info endpoint also // needs to be called with the generated GCP access token. When provided, STS will be // called with additional basic authentication using client_id as username and client_secret as password. @@ -64,20 +67,6 @@ type Config struct { // that include all elements in a given list, in that order. var ( - validTokenURLPatterns = []*regexp.Regexp{ - // The complicated part in the middle matches any number of characters that - // aren't period, spaces, or slashes. - regexp.MustCompile(`(?i)^[^\.\s\/\\]+\.sts\.googleapis\.com$`), - regexp.MustCompile(`(?i)^sts\.googleapis\.com$`), - regexp.MustCompile(`(?i)^sts\.[^\.\s\/\\]+\.googleapis\.com$`), - regexp.MustCompile(`(?i)^[^\.\s\/\\]+-sts\.googleapis\.com$`), - } - validImpersonateURLPatterns = []*regexp.Regexp{ - regexp.MustCompile(`^[^\.\s\/\\]+\.iamcredentials\.googleapis\.com$`), - regexp.MustCompile(`^iamcredentials\.googleapis\.com$`), - regexp.MustCompile(`^iamcredentials\.[^\.\s\/\\]+\.googleapis\.com$`), - regexp.MustCompile(`^[^\.\s\/\\]+-iamcredentials\.googleapis\.com$`), - } validWorkforceAudiencePattern *regexp.Regexp = regexp.MustCompile(`//iam\.googleapis\.com/locations/[^/]+/workforcePools/`) ) @@ -105,25 +94,13 @@ func validateWorkforceAudience(input string) bool { // TokenSource Returns an external account TokenSource struct. This is to be called by package google to construct a google.Credentials. func (c *Config) TokenSource(ctx context.Context) (oauth2.TokenSource, error) { - return c.tokenSource(ctx, validTokenURLPatterns, validImpersonateURLPatterns, "https") + return c.tokenSource(ctx, "https") } // tokenSource is a private function that's directly called by some of the tests, // because the unit test URLs are mocked, and would otherwise fail the // validity check. -func (c *Config) tokenSource(ctx context.Context, tokenURLValidPats []*regexp.Regexp, impersonateURLValidPats []*regexp.Regexp, scheme string) (oauth2.TokenSource, error) { - valid := validateURL(c.TokenURL, tokenURLValidPats, scheme) - if !valid { - return nil, fmt.Errorf("oauth2/google: invalid TokenURL provided while constructing tokenSource") - } - - if c.ServiceAccountImpersonationURL != "" { - valid := validateURL(c.ServiceAccountImpersonationURL, impersonateURLValidPats, scheme) - if !valid { - return nil, fmt.Errorf("oauth2/google: invalid ServiceAccountImpersonationURL provided while constructing tokenSource") - } - } - +func (c *Config) tokenSource(ctx context.Context, scheme string) (oauth2.TokenSource, error) { if c.WorkforcePoolUserProject != "" { valid := validateWorkforceAudience(c.Audience) if !valid { @@ -141,10 +118,11 @@ func (c *Config) tokenSource(ctx context.Context, tokenURLValidPats []*regexp.Re scopes := c.Scopes ts.conf.Scopes = []string{"https://www.googleapis.com/auth/cloud-platform"} imp := ImpersonateTokenSource{ - Ctx: ctx, - URL: c.ServiceAccountImpersonationURL, - Scopes: scopes, - Ts: oauth2.ReuseTokenSource(nil, ts), + Ctx: ctx, + URL: c.ServiceAccountImpersonationURL, + Scopes: scopes, + Ts: oauth2.ReuseTokenSource(nil, ts), + TokenLifetimeSeconds: c.ServiceAccountImpersonationLifetimeSeconds, } return oauth2.ReuseTokenSource(nil, imp), nil } @@ -163,7 +141,7 @@ type format struct { } // CredentialSource stores the information necessary to retrieve the credentials for the STS exchange. -// Either the File or the URL field should be filled, depending on the kind of credential in question. +// One field amongst File, URL, and Executable should be filled, depending on the kind of credential in question. // The EnvironmentID should start with AWS if being used for an AWS credential. type CredentialSource struct { File string `json:"file"` @@ -171,6 +149,8 @@ type CredentialSource struct { URL string `json:"url"` Headers map[string]string `json:"headers"` + Executable *ExecutableConfig `json:"executable"` + EnvironmentID string `json:"environment_id"` RegionURL string `json:"region_url"` RegionalCredVerificationURL string `json:"regional_cred_verification_url"` @@ -179,7 +159,13 @@ type CredentialSource struct { Format format `json:"format"` } -// parse determines the type of CredentialSource needed +type ExecutableConfig struct { + Command string `json:"command"` + TimeoutMillis *int `json:"timeout_millis"` + OutputFile string `json:"output_file"` +} + +// parse determines the type of CredentialSource needed. func (c *Config) parse(ctx context.Context) (baseCredentialSource, error) { if len(c.CredentialSource.EnvironmentID) > 3 && c.CredentialSource.EnvironmentID[:3] == "aws" { if awsVersion, err := strconv.Atoi(c.CredentialSource.EnvironmentID[3:]); err == nil { @@ -199,12 +185,18 @@ func (c *Config) parse(ctx context.Context) (baseCredentialSource, error) { awsCredSource.IMDSv2SessionTokenURL = c.CredentialSource.IMDSv2SessionTokenURL } + if err := awsCredSource.validateMetadataServers(); err != nil { + return nil, err + } + return awsCredSource, nil } } else if c.CredentialSource.File != "" { return fileCredentialSource{File: c.CredentialSource.File, Format: c.CredentialSource.Format}, nil } else if c.CredentialSource.URL != "" { return urlCredentialSource{URL: c.CredentialSource.URL, Headers: c.CredentialSource.Headers, Format: c.CredentialSource.Format, ctx: ctx}, nil + } else if c.CredentialSource.Executable != nil { + return CreateExecutableCredential(ctx, c.CredentialSource.Executable, c) } return nil, fmt.Errorf("oauth2/google: unable to parse credential source") } diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccount/executablecredsource.go b/vendor/golang.org/x/oauth2/google/internal/externalaccount/executablecredsource.go new file mode 100644 index 000000000..579bcce5f --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/internal/externalaccount/executablecredsource.go @@ -0,0 +1,309 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package externalaccount + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "regexp" + "strings" + "time" +) + +var serviceAccountImpersonationRE = regexp.MustCompile("https://iamcredentials.googleapis.com/v1/projects/-/serviceAccounts/(.*@.*):generateAccessToken") + +const ( + executableSupportedMaxVersion = 1 + defaultTimeout = 30 * time.Second + timeoutMinimum = 5 * time.Second + timeoutMaximum = 120 * time.Second + executableSource = "response" + outputFileSource = "output file" +) + +type nonCacheableError struct { + message string +} + +func (nce nonCacheableError) Error() string { + return nce.message +} + +func missingFieldError(source, field string) error { + return fmt.Errorf("oauth2/google: %v missing `%q` field", source, field) +} + +func jsonParsingError(source, data string) error { + return fmt.Errorf("oauth2/google: unable to parse %v\nResponse: %v", source, data) +} + +func malformedFailureError() error { + return nonCacheableError{"oauth2/google: response must include `error` and `message` fields when unsuccessful"} +} + +func userDefinedError(code, message string) error { + return nonCacheableError{fmt.Sprintf("oauth2/google: response contains unsuccessful response: (%v) %v", code, message)} +} + +func unsupportedVersionError(source string, version int) error { + return fmt.Errorf("oauth2/google: %v contains unsupported version: %v", source, version) +} + +func tokenExpiredError() error { + return nonCacheableError{"oauth2/google: the token returned by the executable is expired"} +} + +func tokenTypeError(source string) error { + return fmt.Errorf("oauth2/google: %v contains unsupported token type", source) +} + +func exitCodeError(exitCode int) error { + return fmt.Errorf("oauth2/google: executable command failed with exit code %v", exitCode) +} + +func executableError(err error) error { + return fmt.Errorf("oauth2/google: executable command failed: %v", err) +} + +func executablesDisallowedError() error { + return errors.New("oauth2/google: executables need to be explicitly allowed (set GOOGLE_EXTERNAL_ACCOUNT_ALLOW_EXECUTABLES to '1') to run") +} + +func timeoutRangeError() error { + return errors.New("oauth2/google: invalid `timeout_millis` field — executable timeout must be between 5 and 120 seconds") +} + +func commandMissingError() error { + return errors.New("oauth2/google: missing `command` field — executable command must be provided") +} + +type environment interface { + existingEnv() []string + getenv(string) string + run(ctx context.Context, command string, env []string) ([]byte, error) + now() time.Time +} + +type runtimeEnvironment struct{} + +func (r runtimeEnvironment) existingEnv() []string { + return os.Environ() +} + +func (r runtimeEnvironment) getenv(key string) string { + return os.Getenv(key) +} + +func (r runtimeEnvironment) now() time.Time { + return time.Now().UTC() +} + +func (r runtimeEnvironment) run(ctx context.Context, command string, env []string) ([]byte, error) { + splitCommand := strings.Fields(command) + cmd := exec.CommandContext(ctx, splitCommand[0], splitCommand[1:]...) + cmd.Env = env + + var stdout, stderr bytes.Buffer + cmd.Stdout = &stdout + cmd.Stderr = &stderr + + if err := cmd.Run(); err != nil { + if ctx.Err() == context.DeadlineExceeded { + return nil, context.DeadlineExceeded + } + + if exitError, ok := err.(*exec.ExitError); ok { + return nil, exitCodeError(exitError.ExitCode()) + } + + return nil, executableError(err) + } + + bytesStdout := bytes.TrimSpace(stdout.Bytes()) + if len(bytesStdout) > 0 { + return bytesStdout, nil + } + return bytes.TrimSpace(stderr.Bytes()), nil +} + +type executableCredentialSource struct { + Command string + Timeout time.Duration + OutputFile string + ctx context.Context + config *Config + env environment +} + +// CreateExecutableCredential creates an executableCredentialSource given an ExecutableConfig. +// It also performs defaulting and type conversions. +func CreateExecutableCredential(ctx context.Context, ec *ExecutableConfig, config *Config) (executableCredentialSource, error) { + if ec.Command == "" { + return executableCredentialSource{}, commandMissingError() + } + + result := executableCredentialSource{} + result.Command = ec.Command + if ec.TimeoutMillis == nil { + result.Timeout = defaultTimeout + } else { + result.Timeout = time.Duration(*ec.TimeoutMillis) * time.Millisecond + if result.Timeout < timeoutMinimum || result.Timeout > timeoutMaximum { + return executableCredentialSource{}, timeoutRangeError() + } + } + result.OutputFile = ec.OutputFile + result.ctx = ctx + result.config = config + result.env = runtimeEnvironment{} + return result, nil +} + +type executableResponse struct { + Version int `json:"version,omitempty"` + Success *bool `json:"success,omitempty"` + TokenType string `json:"token_type,omitempty"` + ExpirationTime int64 `json:"expiration_time,omitempty"` + IdToken string `json:"id_token,omitempty"` + SamlResponse string `json:"saml_response,omitempty"` + Code string `json:"code,omitempty"` + Message string `json:"message,omitempty"` +} + +func (cs executableCredentialSource) parseSubjectTokenFromSource(response []byte, source string, now int64) (string, error) { + var result executableResponse + if err := json.Unmarshal(response, &result); err != nil { + return "", jsonParsingError(source, string(response)) + } + + if result.Version == 0 { + return "", missingFieldError(source, "version") + } + + if result.Success == nil { + return "", missingFieldError(source, "success") + } + + if !*result.Success { + if result.Code == "" || result.Message == "" { + return "", malformedFailureError() + } + return "", userDefinedError(result.Code, result.Message) + } + + if result.Version > executableSupportedMaxVersion || result.Version < 0 { + return "", unsupportedVersionError(source, result.Version) + } + + if result.ExpirationTime == 0 && cs.OutputFile != "" { + return "", missingFieldError(source, "expiration_time") + } + + if result.TokenType == "" { + return "", missingFieldError(source, "token_type") + } + + if result.ExpirationTime != 0 && result.ExpirationTime < now { + return "", tokenExpiredError() + } + + if result.TokenType == "urn:ietf:params:oauth:token-type:jwt" || result.TokenType == "urn:ietf:params:oauth:token-type:id_token" { + if result.IdToken == "" { + return "", missingFieldError(source, "id_token") + } + return result.IdToken, nil + } + + if result.TokenType == "urn:ietf:params:oauth:token-type:saml2" { + if result.SamlResponse == "" { + return "", missingFieldError(source, "saml_response") + } + return result.SamlResponse, nil + } + + return "", tokenTypeError(source) +} + +func (cs executableCredentialSource) subjectToken() (string, error) { + if token, err := cs.getTokenFromOutputFile(); token != "" || err != nil { + return token, err + } + + return cs.getTokenFromExecutableCommand() +} + +func (cs executableCredentialSource) getTokenFromOutputFile() (token string, err error) { + if cs.OutputFile == "" { + // This ExecutableCredentialSource doesn't use an OutputFile. + return "", nil + } + + file, err := os.Open(cs.OutputFile) + if err != nil { + // No OutputFile found. Hasn't been created yet, so skip it. + return "", nil + } + defer file.Close() + + data, err := ioutil.ReadAll(io.LimitReader(file, 1<<20)) + if err != nil || len(data) == 0 { + // Cachefile exists, but no data found. Get new credential. + return "", nil + } + + token, err = cs.parseSubjectTokenFromSource(data, outputFileSource, cs.env.now().Unix()) + if err != nil { + if _, ok := err.(nonCacheableError); ok { + // If the cached token is expired we need a new token, + // and if the cache contains a failure, we need to try again. + return "", nil + } + + // There was an error in the cached token, and the developer should be aware of it. + return "", err + } + // Token parsing succeeded. Use found token. + return token, nil +} + +func (cs executableCredentialSource) executableEnvironment() []string { + result := cs.env.existingEnv() + result = append(result, fmt.Sprintf("GOOGLE_EXTERNAL_ACCOUNT_AUDIENCE=%v", cs.config.Audience)) + result = append(result, fmt.Sprintf("GOOGLE_EXTERNAL_ACCOUNT_TOKEN_TYPE=%v", cs.config.SubjectTokenType)) + result = append(result, "GOOGLE_EXTERNAL_ACCOUNT_INTERACTIVE=0") + if cs.config.ServiceAccountImpersonationURL != "" { + matches := serviceAccountImpersonationRE.FindStringSubmatch(cs.config.ServiceAccountImpersonationURL) + if matches != nil { + result = append(result, fmt.Sprintf("GOOGLE_EXTERNAL_ACCOUNT_IMPERSONATED_EMAIL=%v", matches[1])) + } + } + if cs.OutputFile != "" { + result = append(result, fmt.Sprintf("GOOGLE_EXTERNAL_ACCOUNT_OUTPUT_FILE=%v", cs.OutputFile)) + } + return result +} + +func (cs executableCredentialSource) getTokenFromExecutableCommand() (string, error) { + // For security reasons, we need our consumers to set this environment variable to allow executables to be run. + if cs.env.getenv("GOOGLE_EXTERNAL_ACCOUNT_ALLOW_EXECUTABLES") != "1" { + return "", executablesDisallowedError() + } + + ctx, cancel := context.WithDeadline(cs.ctx, cs.env.now().Add(cs.Timeout)) + defer cancel() + + output, err := cs.env.run(ctx, cs.Command, cs.executableEnvironment()) + if err != nil { + return "", err + } + return cs.parseSubjectTokenFromSource(output, executableSource, cs.env.now().Unix()) +} diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccount/impersonate.go b/vendor/golang.org/x/oauth2/google/internal/externalaccount/impersonate.go index 8251fc85e..54c8f209f 100644 --- a/vendor/golang.org/x/oauth2/google/internal/externalaccount/impersonate.go +++ b/vendor/golang.org/x/oauth2/google/internal/externalaccount/impersonate.go @@ -48,12 +48,19 @@ type ImpersonateTokenSource struct { // Each service account must be granted roles/iam.serviceAccountTokenCreator // on the next service account in the chain. Optional. Delegates []string + // TokenLifetimeSeconds is the number of seconds the impersonation token will + // be valid for. + TokenLifetimeSeconds int } // Token performs the exchange to get a temporary service account token to allow access to GCP. func (its ImpersonateTokenSource) Token() (*oauth2.Token, error) { + lifetimeString := "3600s" + if its.TokenLifetimeSeconds != 0 { + lifetimeString = fmt.Sprintf("%ds", its.TokenLifetimeSeconds) + } reqBody := generateAccessTokenReq{ - Lifetime: "3600s", + Lifetime: lifetimeString, Scope: its.Scopes, Delegates: its.Delegates, } diff --git a/vendor/golang.org/x/oauth2/internal/token.go b/vendor/golang.org/x/oauth2/internal/token.go index 355c38696..b4723fcac 100644 --- a/vendor/golang.org/x/oauth2/internal/token.go +++ b/vendor/golang.org/x/oauth2/internal/token.go @@ -19,8 +19,6 @@ import ( "strings" "sync" "time" - - "golang.org/x/net/context/ctxhttp" ) // Token represents the credentials used to authorize @@ -229,7 +227,7 @@ func RetrieveToken(ctx context.Context, clientID, clientSecret, tokenURL string, } func doTokenRoundTrip(ctx context.Context, req *http.Request) (*Token, error) { - r, err := ctxhttp.Do(ctx, ContextClient(ctx), req) + r, err := ContextClient(ctx).Do(req.WithContext(ctx)) if err != nil { return nil, err } diff --git a/vendor/golang.org/x/oauth2/jws/jws.go b/vendor/golang.org/x/oauth2/jws/jws.go index 683d2d271..95015648b 100644 --- a/vendor/golang.org/x/oauth2/jws/jws.go +++ b/vendor/golang.org/x/oauth2/jws/jws.go @@ -178,5 +178,5 @@ func Verify(token string, key *rsa.PublicKey) error { h := sha256.New() h.Write([]byte(signedContent)) - return rsa.VerifyPKCS1v15(key, crypto.SHA256, h.Sum(nil), []byte(signatureString)) + return rsa.VerifyPKCS1v15(key, crypto.SHA256, h.Sum(nil), signatureString) } diff --git a/vendor/golang.org/x/oauth2/oauth2.go b/vendor/golang.org/x/oauth2/oauth2.go index 291df5c83..9085fabe3 100644 --- a/vendor/golang.org/x/oauth2/oauth2.go +++ b/vendor/golang.org/x/oauth2/oauth2.go @@ -16,6 +16,7 @@ import ( "net/url" "strings" "sync" + "time" "golang.org/x/oauth2/internal" ) @@ -140,7 +141,7 @@ func SetAuthURLParam(key, value string) AuthCodeOption { // // State is a token to protect the user from CSRF attacks. You must // always provide a non-empty string and validate that it matches the -// the state query parameter on your redirect callback. +// state query parameter on your redirect callback. // See http://tools.ietf.org/html/rfc6749#section-10.12 for more info. // // Opts may include AccessTypeOnline or AccessTypeOffline, as well @@ -290,6 +291,8 @@ type reuseTokenSource struct { mu sync.Mutex // guards t t *Token + + expiryDelta time.Duration } // Token returns the current token if it's still valid, else will @@ -305,6 +308,7 @@ func (s *reuseTokenSource) Token() (*Token, error) { if err != nil { return nil, err } + t.expiryDelta = s.expiryDelta s.t = t return t, nil } @@ -379,3 +383,30 @@ func ReuseTokenSource(t *Token, src TokenSource) TokenSource { new: src, } } + +// ReuseTokenSource returns a TokenSource that acts in the same manner as the +// TokenSource returned by ReuseTokenSource, except the expiry buffer is +// configurable. The expiration time of a token is calculated as +// t.Expiry.Add(-earlyExpiry). +func ReuseTokenSourceWithExpiry(t *Token, src TokenSource, earlyExpiry time.Duration) TokenSource { + // Don't wrap a reuseTokenSource in itself. That would work, + // but cause an unnecessary number of mutex operations. + // Just build the equivalent one. + if rt, ok := src.(*reuseTokenSource); ok { + if t == nil { + // Just use it directly, but set the expiryDelta to earlyExpiry, + // so the behavior matches what the user expects. + rt.expiryDelta = earlyExpiry + return rt + } + src = rt.new + } + if t != nil { + t.expiryDelta = earlyExpiry + } + return &reuseTokenSource{ + t: t, + new: src, + expiryDelta: earlyExpiry, + } +} diff --git a/vendor/golang.org/x/oauth2/token.go b/vendor/golang.org/x/oauth2/token.go index 822720341..7c64006de 100644 --- a/vendor/golang.org/x/oauth2/token.go +++ b/vendor/golang.org/x/oauth2/token.go @@ -16,10 +16,10 @@ import ( "golang.org/x/oauth2/internal" ) -// expiryDelta determines how earlier a token should be considered +// defaultExpiryDelta determines how earlier a token should be considered // expired than its actual expiration time. It is used to avoid late // expirations due to client-server time mismatches. -const expiryDelta = 10 * time.Second +const defaultExpiryDelta = 10 * time.Second // Token represents the credentials used to authorize // the requests to access protected resources on the OAuth 2.0 @@ -52,6 +52,11 @@ type Token struct { // raw optionally contains extra metadata from the server // when updating a token. raw interface{} + + // expiryDelta is used to calculate when a token is considered + // expired, by subtracting from Expiry. If zero, defaultExpiryDelta + // is used. + expiryDelta time.Duration } // Type returns t.TokenType if non-empty, else "Bearer". @@ -127,6 +132,11 @@ func (t *Token) expired() bool { if t.Expiry.IsZero() { return false } + + expiryDelta := defaultExpiryDelta + if t.expiryDelta != 0 { + expiryDelta = t.expiryDelta + } return t.Expiry.Round(0).Add(-expiryDelta).Before(timeNow()) } diff --git a/vendor/golang.org/x/time/AUTHORS b/vendor/golang.org/x/time/AUTHORS deleted file mode 100644 index 15167cd74..000000000 --- a/vendor/golang.org/x/time/AUTHORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code refers to The Go Authors for copyright purposes. -# The master list of authors is in the main Go distribution, -# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/time/CONTRIBUTORS b/vendor/golang.org/x/time/CONTRIBUTORS deleted file mode 100644 index 1c4577e96..000000000 --- a/vendor/golang.org/x/time/CONTRIBUTORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code was written by the Go contributors. -# The master list of contributors is in the main Go distribution, -# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/time/rate/rate.go b/vendor/golang.org/x/time/rate/rate.go index 12e813cc7..f0e0cf3cb 100644 --- a/vendor/golang.org/x/time/rate/rate.go +++ b/vendor/golang.org/x/time/rate/rate.go @@ -80,6 +80,19 @@ func (lim *Limiter) Burst() int { return lim.burst } +// TokensAt returns the number of tokens available at time t. +func (lim *Limiter) TokensAt(t time.Time) float64 { + lim.mu.Lock() + _, tokens := lim.advance(t) // does not mutate lim + lim.mu.Unlock() + return tokens +} + +// Tokens returns the number of tokens available now. +func (lim *Limiter) Tokens() float64 { + return lim.TokensAt(time.Now()) +} + // NewLimiter returns a new Limiter that allows events up to rate r and permits // bursts of at most b tokens. func NewLimiter(r Limit, b int) *Limiter { @@ -89,16 +102,16 @@ func NewLimiter(r Limit, b int) *Limiter { } } -// Allow is shorthand for AllowN(time.Now(), 1). +// Allow reports whether an event may happen now. func (lim *Limiter) Allow() bool { return lim.AllowN(time.Now(), 1) } -// AllowN reports whether n events may happen at time now. +// AllowN reports whether n events may happen at time t. // Use this method if you intend to drop / skip events that exceed the rate limit. // Otherwise use Reserve or Wait. -func (lim *Limiter) AllowN(now time.Time, n int) bool { - return lim.reserveN(now, n, 0).ok +func (lim *Limiter) AllowN(t time.Time, n int) bool { + return lim.reserveN(t, n, 0).ok } // A Reservation holds information about events that are permitted by a Limiter to happen after a delay. @@ -125,17 +138,17 @@ func (r *Reservation) Delay() time.Duration { } // InfDuration is the duration returned by Delay when a Reservation is not OK. -const InfDuration = time.Duration(1<<63 - 1) +const InfDuration = time.Duration(math.MaxInt64) // DelayFrom returns the duration for which the reservation holder must wait // before taking the reserved action. Zero duration means act immediately. // InfDuration means the limiter cannot grant the tokens requested in this // Reservation within the maximum wait time. -func (r *Reservation) DelayFrom(now time.Time) time.Duration { +func (r *Reservation) DelayFrom(t time.Time) time.Duration { if !r.ok { return InfDuration } - delay := r.timeToAct.Sub(now) + delay := r.timeToAct.Sub(t) if delay < 0 { return 0 } @@ -150,7 +163,7 @@ func (r *Reservation) Cancel() { // CancelAt indicates that the reservation holder will not perform the reserved action // and reverses the effects of this Reservation on the rate limit as much as possible, // considering that other reservations may have already been made. -func (r *Reservation) CancelAt(now time.Time) { +func (r *Reservation) CancelAt(t time.Time) { if !r.ok { return } @@ -158,7 +171,7 @@ func (r *Reservation) CancelAt(now time.Time) { r.lim.mu.Lock() defer r.lim.mu.Unlock() - if r.lim.limit == Inf || r.tokens == 0 || r.timeToAct.Before(now) { + if r.lim.limit == Inf || r.tokens == 0 || r.timeToAct.Before(t) { return } @@ -170,18 +183,18 @@ func (r *Reservation) CancelAt(now time.Time) { return } // advance time to now - now, _, tokens := r.lim.advance(now) + t, tokens := r.lim.advance(t) // calculate new number of tokens tokens += restoreTokens if burst := float64(r.lim.burst); tokens > burst { tokens = burst } // update state - r.lim.last = now + r.lim.last = t r.lim.tokens = tokens if r.timeToAct == r.lim.lastEvent { prevEvent := r.timeToAct.Add(r.limit.durationFromTokens(float64(-r.tokens))) - if !prevEvent.Before(now) { + if !prevEvent.Before(t) { r.lim.lastEvent = prevEvent } } @@ -208,8 +221,8 @@ func (lim *Limiter) Reserve() *Reservation { // Use this method if you wish to wait and slow down in accordance with the rate limit without dropping events. // If you need to respect a deadline or cancel the delay, use Wait instead. // To drop or skip events exceeding rate limit, use Allow instead. -func (lim *Limiter) ReserveN(now time.Time, n int) *Reservation { - r := lim.reserveN(now, n, InfDuration) +func (lim *Limiter) ReserveN(t time.Time, n int) *Reservation { + r := lim.reserveN(t, n, InfDuration) return &r } @@ -234,7 +247,7 @@ func (lim *Limiter) WaitN(ctx context.Context, n int) (err error) { } // wait is the internal implementation of WaitN. -func (lim *Limiter) wait(ctx context.Context, n int, now time.Time, newTimer func(d time.Duration) (<-chan time.Time, func() bool, func())) error { +func (lim *Limiter) wait(ctx context.Context, n int, t time.Time, newTimer func(d time.Duration) (<-chan time.Time, func() bool, func())) error { lim.mu.Lock() burst := lim.burst limit := lim.limit @@ -252,15 +265,15 @@ func (lim *Limiter) wait(ctx context.Context, n int, now time.Time, newTimer fun // Determine wait limit waitLimit := InfDuration if deadline, ok := ctx.Deadline(); ok { - waitLimit = deadline.Sub(now) + waitLimit = deadline.Sub(t) } // Reserve - r := lim.reserveN(now, n, waitLimit) + r := lim.reserveN(t, n, waitLimit) if !r.ok { return fmt.Errorf("rate: Wait(n=%d) would exceed context deadline", n) } // Wait if necessary - delay := r.DelayFrom(now) + delay := r.DelayFrom(t) if delay == 0 { return nil } @@ -287,13 +300,13 @@ func (lim *Limiter) SetLimit(newLimit Limit) { // SetLimitAt sets a new Limit for the limiter. The new Limit, and Burst, may be violated // or underutilized by those which reserved (using Reserve or Wait) but did not yet act // before SetLimitAt was called. -func (lim *Limiter) SetLimitAt(now time.Time, newLimit Limit) { +func (lim *Limiter) SetLimitAt(t time.Time, newLimit Limit) { lim.mu.Lock() defer lim.mu.Unlock() - now, _, tokens := lim.advance(now) + t, tokens := lim.advance(t) - lim.last = now + lim.last = t lim.tokens = tokens lim.limit = newLimit } @@ -304,13 +317,13 @@ func (lim *Limiter) SetBurst(newBurst int) { } // SetBurstAt sets a new burst size for the limiter. -func (lim *Limiter) SetBurstAt(now time.Time, newBurst int) { +func (lim *Limiter) SetBurstAt(t time.Time, newBurst int) { lim.mu.Lock() defer lim.mu.Unlock() - now, _, tokens := lim.advance(now) + t, tokens := lim.advance(t) - lim.last = now + lim.last = t lim.tokens = tokens lim.burst = newBurst } @@ -318,7 +331,7 @@ func (lim *Limiter) SetBurstAt(now time.Time, newBurst int) { // reserveN is a helper method for AllowN, ReserveN, and WaitN. // maxFutureReserve specifies the maximum reservation wait duration allowed. // reserveN returns Reservation, not *Reservation, to avoid allocation in AllowN and WaitN. -func (lim *Limiter) reserveN(now time.Time, n int, maxFutureReserve time.Duration) Reservation { +func (lim *Limiter) reserveN(t time.Time, n int, maxFutureReserve time.Duration) Reservation { lim.mu.Lock() defer lim.mu.Unlock() @@ -327,7 +340,7 @@ func (lim *Limiter) reserveN(now time.Time, n int, maxFutureReserve time.Duratio ok: true, lim: lim, tokens: n, - timeToAct: now, + timeToAct: t, } } else if lim.limit == 0 { var ok bool @@ -339,11 +352,11 @@ func (lim *Limiter) reserveN(now time.Time, n int, maxFutureReserve time.Duratio ok: ok, lim: lim, tokens: lim.burst, - timeToAct: now, + timeToAct: t, } } - now, last, tokens := lim.advance(now) + t, tokens := lim.advance(t) // Calculate the remaining number of tokens resulting from the request. tokens -= float64(n) @@ -365,16 +378,12 @@ func (lim *Limiter) reserveN(now time.Time, n int, maxFutureReserve time.Duratio } if ok { r.tokens = n - r.timeToAct = now.Add(waitDuration) - } + r.timeToAct = t.Add(waitDuration) - // Update state - if ok { - lim.last = now + // Update state + lim.last = t lim.tokens = tokens lim.lastEvent = r.timeToAct - } else { - lim.last = last } return r @@ -383,20 +392,20 @@ func (lim *Limiter) reserveN(now time.Time, n int, maxFutureReserve time.Duratio // advance calculates and returns an updated state for lim resulting from the passage of time. // lim is not changed. // advance requires that lim.mu is held. -func (lim *Limiter) advance(now time.Time) (newNow time.Time, newLast time.Time, newTokens float64) { +func (lim *Limiter) advance(t time.Time) (newT time.Time, newTokens float64) { last := lim.last - if now.Before(last) { - last = now + if t.Before(last) { + last = t } // Calculate the new number of tokens, due to time that passed. - elapsed := now.Sub(last) + elapsed := t.Sub(last) delta := lim.limit.tokensFromDuration(elapsed) tokens := lim.tokens + delta if burst := float64(lim.burst); tokens > burst { tokens = burst } - return now, last, tokens + return t, tokens } // durationFromTokens is a unit conversion function from the number of tokens to the duration diff --git a/vendor/golang.org/x/time/rate/sometimes.go b/vendor/golang.org/x/time/rate/sometimes.go new file mode 100644 index 000000000..6ba99ddb6 --- /dev/null +++ b/vendor/golang.org/x/time/rate/sometimes.go @@ -0,0 +1,67 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package rate + +import ( + "sync" + "time" +) + +// Sometimes will perform an action occasionally. The First, Every, and +// Interval fields govern the behavior of Do, which performs the action. +// A zero Sometimes value will perform an action exactly once. +// +// # Example: logging with rate limiting +// +// var sometimes = rate.Sometimes{First: 3, Interval: 10*time.Second} +// func Spammy() { +// sometimes.Do(func() { log.Info("here I am!") }) +// } +type Sometimes struct { + First int // if non-zero, the first N calls to Do will run f. + Every int // if non-zero, every Nth call to Do will run f. + Interval time.Duration // if non-zero and Interval has elapsed since f's last run, Do will run f. + + mu sync.Mutex + count int // number of Do calls + last time.Time // last time f was run +} + +// Do runs the function f as allowed by First, Every, and Interval. +// +// The model is a union (not intersection) of filters. The first call to Do +// always runs f. Subsequent calls to Do run f if allowed by First or Every or +// Interval. +// +// A non-zero First:N causes the first N Do(f) calls to run f. +// +// A non-zero Every:M causes every Mth Do(f) call, starting with the first, to +// run f. +// +// A non-zero Interval causes Do(f) to run f if Interval has elapsed since +// Do last ran f. +// +// Specifying multiple filters produces the union of these execution streams. +// For example, specifying both First:N and Every:M causes the first N Do(f) +// calls and every Mth Do(f) call, starting with the first, to run f. See +// Examples for more. +// +// If Do is called multiple times simultaneously, the calls will block and run +// serially. Therefore, Do is intended for lightweight operations. +// +// Because a call to Do may block until f returns, if f causes Do to be called, +// it will deadlock. +func (s *Sometimes) Do(f func()) { + s.mu.Lock() + defer s.mu.Unlock() + if s.count == 0 || + (s.First > 0 && s.count < s.First) || + (s.Every > 0 && s.count%s.Every == 0) || + (s.Interval > 0 && time.Since(s.last) >= s.Interval) { + f() + s.last = time.Now() + } + s.count++ +} diff --git a/vendor/golang.org/x/tools/go/ssa/instantiate.go b/vendor/golang.org/x/tools/go/ssa/instantiate.go index f6b2533f2..38249dea2 100644 --- a/vendor/golang.org/x/tools/go/ssa/instantiate.go +++ b/vendor/golang.org/x/tools/go/ssa/instantiate.go @@ -148,7 +148,8 @@ func (insts *instanceSet) lookupOrCreate(targs []types.Type, parameterized *tpWa if prog.mode&InstantiateGenerics != 0 && concrete { synthetic = fmt.Sprintf("instance of %s", fn.Name()) - subst = makeSubster(prog.ctxt, fn.typeparams, targs, false) + scope := typeparams.OriginMethod(obj).Scope() + subst = makeSubster(prog.ctxt, scope, fn.typeparams, targs, false) } else { synthetic = fmt.Sprintf("instantiation wrapper of %s", fn.Name()) } diff --git a/vendor/golang.org/x/tools/go/ssa/parameterized.go b/vendor/golang.org/x/tools/go/ssa/parameterized.go index b11413c81..3fc4348fc 100644 --- a/vendor/golang.org/x/tools/go/ssa/parameterized.go +++ b/vendor/golang.org/x/tools/go/ssa/parameterized.go @@ -17,7 +17,7 @@ type tpWalker struct { seen map[types.Type]bool } -// isParameterized returns true when typ contains any type parameters. +// isParameterized returns true when typ reaches any type parameter. func (w *tpWalker) isParameterized(typ types.Type) (res bool) { // NOTE: Adapted from go/types/infer.go. Try to keep in sync. @@ -101,6 +101,7 @@ func (w *tpWalker) isParameterized(typ types.Type) (res bool) { return true } } + return w.isParameterized(t.Underlying()) // recurse for types local to parameterized functions case *typeparams.TypeParam: return true diff --git a/vendor/golang.org/x/tools/go/ssa/ssa.go b/vendor/golang.org/x/tools/go/ssa/ssa.go index 5904b817b..c3471c156 100644 --- a/vendor/golang.org/x/tools/go/ssa/ssa.go +++ b/vendor/golang.org/x/tools/go/ssa/ssa.go @@ -36,7 +36,7 @@ type Program struct { bounds map[boundsKey]*Function // bounds for curried x.Method closures thunks map[selectionKey]*Function // thunks for T.Method expressions instances map[*Function]*instanceSet // instances of generic functions - parameterized tpWalker // determines whether a type is parameterized. + parameterized tpWalker // determines whether a type reaches a type parameter. } // A Package is a single analyzed Go package containing Members for diff --git a/vendor/golang.org/x/tools/go/ssa/subst.go b/vendor/golang.org/x/tools/go/ssa/subst.go index d7f8ae4a7..7efab3578 100644 --- a/vendor/golang.org/x/tools/go/ssa/subst.go +++ b/vendor/golang.org/x/tools/go/ssa/subst.go @@ -5,7 +5,6 @@ package ssa import ( - "fmt" "go/types" "golang.org/x/tools/internal/typeparams" @@ -19,41 +18,42 @@ import ( // // Not concurrency-safe. type subster struct { - // TODO(zpavlinovic): replacements can contain type params - // when generating instances inside of a generic function body. replacements map[*typeparams.TypeParam]types.Type // values should contain no type params cache map[types.Type]types.Type // cache of subst results - ctxt *typeparams.Context - debug bool // perform extra debugging checks + ctxt *typeparams.Context // cache for instantiation + scope *types.Scope // *types.Named declared within this scope can be substituted (optional) + debug bool // perform extra debugging checks // TODO(taking): consider adding Pos + // TODO(zpavlinovic): replacements can contain type params + // when generating instances inside of a generic function body. } // Returns a subster that replaces tparams[i] with targs[i]. Uses ctxt as a cache. // targs should not contain any types in tparams. -func makeSubster(ctxt *typeparams.Context, tparams *typeparams.TypeParamList, targs []types.Type, debug bool) *subster { +// scope is the (optional) lexical block of the generic function for which we are substituting. +func makeSubster(ctxt *typeparams.Context, scope *types.Scope, tparams *typeparams.TypeParamList, targs []types.Type, debug bool) *subster { assert(tparams.Len() == len(targs), "makeSubster argument count must match") subst := &subster{ replacements: make(map[*typeparams.TypeParam]types.Type, tparams.Len()), cache: make(map[types.Type]types.Type), ctxt: ctxt, + scope: scope, debug: debug, } for i := 0; i < tparams.Len(); i++ { subst.replacements[tparams.At(i)] = targs[i] } if subst.debug { - if err := subst.wellFormed(); err != nil { - panic(err) - } + subst.wellFormed() } return subst } -// wellFormed returns an error if subst was not properly initialized. -func (subst *subster) wellFormed() error { - if subst == nil || len(subst.replacements) == 0 { - return nil +// wellFormed asserts that subst was properly initialized. +func (subst *subster) wellFormed() { + if subst == nil { + return } // Check that all of the type params do not appear in the arguments. s := make(map[types.Type]bool, len(subst.replacements)) @@ -62,10 +62,9 @@ func (subst *subster) wellFormed() error { } for _, r := range subst.replacements { if reaches(r, s) { - return fmt.Errorf("\n‰r %s s %v replacements %v\n", r, s, subst.replacements) + panic(subst) } } - return nil } // typ returns the type of t with the type parameter tparams[i] substituted @@ -306,29 +305,56 @@ func (subst *subster) interface_(iface *types.Interface) *types.Interface { } func (subst *subster) named(t *types.Named) types.Type { - // A name type may be: - // (1) ordinary (no type parameters, no type arguments), - // (2) generic (type parameters but no type arguments), or - // (3) instantiated (type parameters and type arguments). + // A named type may be: + // (1) ordinary named type (non-local scope, no type parameters, no type arguments), + // (2) locally scoped type, + // (3) generic (type parameters but no type arguments), or + // (4) instantiated (type parameters and type arguments). tparams := typeparams.ForNamed(t) if tparams.Len() == 0 { - // case (1) ordinary + if subst.scope != nil && !subst.scope.Contains(t.Obj().Pos()) { + // Outside the current function scope? + return t // case (1) ordinary + } - // Note: If Go allows for local type declarations in generic - // functions we may need to descend into underlying as well. - return t + // case (2) locally scoped type. + // Create a new named type to represent this instantiation. + // We assume that local types of distinct instantiations of a + // generic function are distinct, even if they don't refer to + // type parameters, but the spec is unclear; see golang/go#58573. + // + // Subtle: We short circuit substitution and use a newly created type in + // subst, i.e. cache[t]=n, to pre-emptively replace t with n in recursive + // types during traversal. This both breaks infinite cycles and allows for + // constructing types with the replacement applied in subst.typ(under). + // + // Example: + // func foo[T any]() { + // type linkedlist struct { + // next *linkedlist + // val T + // } + // } + // + // When the field `next *linkedlist` is visited during subst.typ(under), + // we want the substituted type for the field `next` to be `*n`. + n := types.NewNamed(t.Obj(), nil, nil) + subst.cache[t] = n + subst.cache[n] = n + n.SetUnderlying(subst.typ(t.Underlying())) + return n } targs := typeparams.NamedTypeArgs(t) // insts are arguments to instantiate using. insts := make([]types.Type, tparams.Len()) - // case (2) generic ==> targs.Len() == 0 + // case (3) generic ==> targs.Len() == 0 // Instantiating a generic with no type arguments should be unreachable. // Please report a bug if you encounter this. assert(targs.Len() != 0, "substition into a generic Named type is currently unsupported") - // case (3) instantiated. + // case (4) instantiated. // Substitute into the type arguments and instantiate the replacements/ // Example: // type N[A any] func() A @@ -378,19 +404,26 @@ func (subst *subster) signature(t *types.Signature) types.Type { } // reaches returns true if a type t reaches any type t' s.t. c[t'] == true. -// Updates c to cache results. +// It updates c to cache results. +// +// reaches is currently only part of the wellFormed debug logic, and +// in practice c is initially only type parameters. It is not currently +// relied on in production. func reaches(t types.Type, c map[types.Type]bool) (res bool) { if c, ok := c[t]; ok { return c } - c[t] = false // prevent cycles + + // c is populated with temporary false entries as types are visited. + // This avoids repeat visits and break cycles. + c[t] = false defer func() { c[t] = res }() switch t := t.(type) { case *typeparams.TypeParam, *types.Basic: - // no-op => c == false + return false case *types.Array: return reaches(t.Elem(), c) case *types.Slice: diff --git a/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go b/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go index c160acb68..be8f5a867 100644 --- a/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go +++ b/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go @@ -31,6 +31,8 @@ import ( "strings" "golang.org/x/tools/internal/typeparams" + + _ "unsafe" // for go:linkname ) // A Path is an opaque name that identifies a types.Object @@ -111,7 +113,7 @@ const ( opObj = 'O' // .Obj() (Named, TypeParam) ) -// The For function returns the path to an object relative to its package, +// For returns the path to an object relative to its package, // or an error if the object is not accessible from the package's Scope. // // The For function guarantees to return a path only for the following objects: @@ -144,6 +146,23 @@ const ( // // where p is the package (*types.Package) to which X belongs. func For(obj types.Object) (Path, error) { + return newEncoderFor()(obj) +} + +// An encoder amortizes the cost of encoding the paths of multiple objects. +// Nonexported pending approval of proposal 58668. +type encoder struct { + scopeNamesMemo map[*types.Scope][]string // memoization of Scope.Names() + namedMethodsMemo map[*types.Named][]*types.Func // memoization of namedMethods() +} + +// Exposed to gopls via golang.org/x/tools/internal/typesinternal +// pending approval of proposal 58668. +// +//go:linkname newEncoderFor +func newEncoderFor() func(types.Object) (Path, error) { return new(encoder).For } + +func (enc *encoder) For(obj types.Object) (Path, error) { pkg := obj.Pkg() // This table lists the cases of interest. @@ -225,7 +244,7 @@ func For(obj types.Object) (Path, error) { return "", fmt.Errorf("func is not a method: %v", obj) } - if path, ok := concreteMethod(obj); ok { + if path, ok := enc.concreteMethod(obj); ok { // Fast path for concrete methods that avoids looping over scope. return path, nil } @@ -241,7 +260,7 @@ func For(obj types.Object) (Path, error) { // the best paths because non-types may // refer to types, but not the reverse. empty := make([]byte, 0, 48) // initial space - names := scope.Names() + names := enc.scopeNames(scope) for _, name := range names { o := scope.Lookup(name) tname, ok := o.(*types.TypeName) @@ -294,9 +313,7 @@ func For(obj types.Object) (Path, error) { // Note that method index here is always with respect // to canonical ordering of methods, regardless of how // they appear in the underlying type. - canonical := canonicalize(T) - for i := 0; i < len(canonical); i++ { - m := canonical[i] + for i, m := range enc.namedMethods(T) { path2 := appendOpArg(path, opMethod, i) if m == obj { return Path(path2), nil // found declared method @@ -324,7 +341,7 @@ func appendOpArg(path []byte, op byte, arg int) []byte { // This function is just an optimization that avoids the general scope walking // approach. You are expected to fall back to the general approach if this // function fails. -func concreteMethod(meth *types.Func) (Path, bool) { +func (enc *encoder) concreteMethod(meth *types.Func) (Path, bool) { // Concrete methods can only be declared on package-scoped named types. For // that reason we can skip the expensive walk over the package scope: the // path will always be package -> named type -> method. We can trivially get @@ -397,8 +414,7 @@ func concreteMethod(meth *types.Func) (Path, bool) { path := make([]byte, 0, len(name)+8) path = append(path, name...) path = append(path, opType) - canonical := canonicalize(named) - for i, m := range canonical { + for i, m := range enc.namedMethods(named) { if m == meth { path = appendOpArg(path, opMethod, i) return Path(path), true @@ -663,15 +679,23 @@ func Object(pkg *types.Package, p Path) (types.Object, error) { t = nil case opMethod: - hasMethods, ok := t.(hasMethods) // Interface or Named - if !ok { + switch t := t.(type) { + case *types.Interface: + if index >= t.NumMethods() { + return nil, fmt.Errorf("method index %d out of range [0-%d)", index, t.NumMethods()) + } + obj = t.Method(index) // Id-ordered + + case *types.Named: + methods := namedMethods(t) // (unmemoized) + if index >= len(methods) { + return nil, fmt.Errorf("method index %d out of range [0-%d)", index, len(methods)) + } + obj = methods[index] // Id-ordered + + default: return nil, fmt.Errorf("cannot apply %q to %s (got %T, want interface or named)", code, t, t) } - canonical := canonicalize(hasMethods) - if n := len(canonical); index >= n { - return nil, fmt.Errorf("method index %d out of range [0-%d)", index, n) - } - obj = canonical[index] t = nil case opObj: @@ -694,27 +718,45 @@ func Object(pkg *types.Package, p Path) (types.Object, error) { return obj, nil // success } -// hasMethods is an abstraction of *types.{Interface,Named}. This is pulled up -// because it is used by methodOrdering, which is in turn used by both encoding -// and decoding. -type hasMethods interface { - Method(int) *types.Func - NumMethods() int +// namedMethods returns the methods of a Named type in ascending Id order. +func namedMethods(named *types.Named) []*types.Func { + methods := make([]*types.Func, named.NumMethods()) + for i := range methods { + methods[i] = named.Method(i) + } + sort.Slice(methods, func(i, j int) bool { + return methods[i].Id() < methods[j].Id() + }) + return methods } -// canonicalize returns a canonical order for the methods in a hasMethod. -func canonicalize(hm hasMethods) []*types.Func { - count := hm.NumMethods() - if count <= 0 { - return nil +// scopeNames is a memoization of scope.Names. Callers must not modify the result. +func (enc *encoder) scopeNames(scope *types.Scope) []string { + m := enc.scopeNamesMemo + if m == nil { + m = make(map[*types.Scope][]string) + enc.scopeNamesMemo = m + } + names, ok := m[scope] + if !ok { + names = scope.Names() // allocates and sorts + m[scope] = names } - canon := make([]*types.Func, count) - for i := 0; i < count; i++ { - canon[i] = hm.Method(i) + return names +} + +// namedMethods is a memoization of the namedMethods function. Callers must not modify the result. +func (enc *encoder) namedMethods(named *types.Named) []*types.Func { + m := enc.namedMethodsMemo + if m == nil { + m = make(map[*types.Named][]*types.Func) + enc.namedMethodsMemo = m } - less := func(i, j int) bool { - return canon[i].Id() < canon[j].Id() + methods, ok := m[named] + if !ok { + methods = namedMethods(named) // allocates and sorts + m[named] = methods } - sort.Slice(canon, less) - return canon + return methods + } diff --git a/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go b/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go index 0372fb3a6..a973dece9 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go @@ -7,6 +7,18 @@ // Package gcimporter provides various functions for reading // gc-generated object files that can be used to implement the // Importer interface defined by the Go 1.5 standard library package. +// +// The encoding is deterministic: if the encoder is applied twice to +// the same types.Package data structure, both encodings are equal. +// This property may be important to avoid spurious changes in +// applications such as build systems. +// +// However, the encoder is not necessarily idempotent. Importing an +// exported package may yield a types.Package that, while it +// represents the same set of Go types as the original, may differ in +// the details of its internal representation. Because of these +// differences, re-encoding the imported package may yield a +// different, but equally valid, encoding of the package. package gcimporter // import "golang.org/x/tools/internal/gcimporter" import ( diff --git a/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go b/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go index b285a11ce..34fc783f8 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go @@ -12,6 +12,7 @@ package gcimporter import ( "go/token" "go/types" + "sort" "strings" "golang.org/x/tools/internal/pkgbits" @@ -121,6 +122,16 @@ func readUnifiedPackage(fset *token.FileSet, ctxt *types.Context, imports map[st iface.Complete() } + // Imports() of pkg are all of the transitive packages that were loaded. + var imps []*types.Package + for _, imp := range pr.pkgs { + if imp != nil && imp != pkg { + imps = append(imps, imp) + } + } + sort.Sort(byPath(imps)) + pkg.SetImports(imps) + pkg.MarkComplete() return pkg } @@ -260,39 +271,9 @@ func (r *reader) doPkg() *types.Package { pkg := types.NewPackage(path, name) r.p.imports[path] = pkg - imports := make([]*types.Package, r.Len()) - for i := range imports { - imports[i] = r.pkg() - } - pkg.SetImports(flattenImports(imports)) - return pkg } -// flattenImports returns the transitive closure of all imported -// packages rooted from pkgs. -func flattenImports(pkgs []*types.Package) []*types.Package { - var res []*types.Package - seen := make(map[*types.Package]struct{}) - for _, pkg := range pkgs { - if _, ok := seen[pkg]; ok { - continue - } - seen[pkg] = struct{}{} - res = append(res, pkg) - - // pkg.Imports() is already flattened. - for _, pkg := range pkg.Imports() { - if _, ok := seen[pkg]; ok { - continue - } - seen[pkg] = struct{}{} - res = append(res, pkg) - } - } - return res -} - // @@@ Types func (r *reader) typ() types.Type { diff --git a/vendor/golang.org/x/tools/internal/typeparams/common.go b/vendor/golang.org/x/tools/internal/typeparams/common.go index 25a1426d3..cfba8189f 100644 --- a/vendor/golang.org/x/tools/internal/typeparams/common.go +++ b/vendor/golang.org/x/tools/internal/typeparams/common.go @@ -87,7 +87,6 @@ func IsTypeParam(t types.Type) bool { func OriginMethod(fn *types.Func) *types.Func { recv := fn.Type().(*types.Signature).Recv() if recv == nil { - return fn } base := recv.Type() diff --git a/vendor/golang.org/x/tools/internal/typesinternal/types.go b/vendor/golang.org/x/tools/internal/typesinternal/types.go index ce7d4351b..3c53fbc63 100644 --- a/vendor/golang.org/x/tools/internal/typesinternal/types.go +++ b/vendor/golang.org/x/tools/internal/typesinternal/types.go @@ -11,6 +11,8 @@ import ( "go/types" "reflect" "unsafe" + + "golang.org/x/tools/go/types/objectpath" ) func SetUsesCgo(conf *types.Config) bool { @@ -50,3 +52,10 @@ func ReadGo116ErrorData(err types.Error) (code ErrorCode, start, end token.Pos, } var SetGoVersion = func(conf *types.Config, version string) bool { return false } + +// NewObjectpathEncoder returns a function closure equivalent to +// objectpath.For but amortized for multiple (sequential) calls. +// It is a temporary workaround, pending the approval of proposal 58668. +// +//go:linkname NewObjectpathFunc golang.org/x/tools/go/types/objectpath.newEncoderFor +func NewObjectpathFunc() func(types.Object) (objectpath.Path, error) diff --git a/vendor/google.golang.org/api/cloudresourcemanager/v1/cloudresourcemanager-api.json b/vendor/google.golang.org/api/cloudresourcemanager/v1/cloudresourcemanager-api.json index a837ca5f4..f6b5ecf47 100644 --- a/vendor/google.golang.org/api/cloudresourcemanager/v1/cloudresourcemanager-api.json +++ b/vendor/google.golang.org/api/cloudresourcemanager/v1/cloudresourcemanager-api.json @@ -1171,7 +1171,7 @@ } } }, - "revision": "20220710", + "revision": "20230129", "rootUrl": "https://cloudresourcemanager.googleapis.com/", "schemas": { "Ancestor": { @@ -1242,7 +1242,7 @@ "description": "The condition that is associated with this binding. If the condition evaluates to `true`, then this binding applies to the current request. If the condition evaluates to `false`, then this binding does not apply to the current request. However, a different role binding might grant the same role to one or more of the principals in this binding. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies)." }, "members": { - "description": "Specifies the principals requesting access for a Google Cloud resource. `members` can have the following values: * `allUsers`: A special identifier that represents anyone who is on the internet; with or without a Google account. * `allAuthenticatedUsers`: A special identifier that represents anyone who is authenticated with a Google account or a service account. * `user:{emailid}`: An email address that represents a specific Google account. For example, `alice@example.com` . * `serviceAccount:{emailid}`: An email address that represents a service account. For example, `my-other-app@appspot.gserviceaccount.com`. * `group:{emailid}`: An email address that represents a Google group. For example, `admins@example.com`. * `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a user that has been recently deleted. For example, `alice@example.com?uid=123456789012345678901`. If the user is recovered, this value reverts to `user:{emailid}` and the recovered user retains the role in the binding. * `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a service account that has been recently deleted. For example, `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`. If the service account is undeleted, this value reverts to `serviceAccount:{emailid}` and the undeleted service account retains the role in the binding. * `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a Google group that has been recently deleted. For example, `admins@example.com?uid=123456789012345678901`. If the group is recovered, this value reverts to `group:{emailid}` and the recovered group retains the role in the binding. * `domain:{domain}`: The G Suite domain (primary) that represents all the users of that domain. For example, `google.com` or `example.com`. ", + "description": "Specifies the principals requesting access for a Google Cloud resource. `members` can have the following values: * `allUsers`: A special identifier that represents anyone who is on the internet; with or without a Google account. * `allAuthenticatedUsers`: A special identifier that represents anyone who is authenticated with a Google account or a service account. Does not include identities that come from external identity providers (IdPs) through identity federation. * `user:{emailid}`: An email address that represents a specific Google account. For example, `alice@example.com` . * `serviceAccount:{emailid}`: An email address that represents a Google service account. For example, `my-other-app@appspot.gserviceaccount.com`. * `serviceAccount:{projectid}.svc.id.goog[{namespace}/{kubernetes-sa}]`: An identifier for a [Kubernetes service account](https://cloud.google.com/kubernetes-engine/docs/how-to/kubernetes-service-accounts). For example, `my-project.svc.id.goog[my-namespace/my-kubernetes-sa]`. * `group:{emailid}`: An email address that represents a Google group. For example, `admins@example.com`. * `domain:{domain}`: The G Suite domain (primary) that represents all the users of that domain. For example, `google.com` or `example.com`. * `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a user that has been recently deleted. For example, `alice@example.com?uid=123456789012345678901`. If the user is recovered, this value reverts to `user:{emailid}` and the recovered user retains the role in the binding. * `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a service account that has been recently deleted. For example, `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`. If the service account is undeleted, this value reverts to `serviceAccount:{emailid}` and the undeleted service account retains the role in the binding. * `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a Google group that has been recently deleted. For example, `admins@example.com?uid=123456789012345678901`. If the group is recovered, this value reverts to `group:{emailid}` and the recovered group retains the role in the binding.", "items": { "type": "string" }, diff --git a/vendor/google.golang.org/api/cloudresourcemanager/v1/cloudresourcemanager-gen.go b/vendor/google.golang.org/api/cloudresourcemanager/v1/cloudresourcemanager-gen.go index 214aaef0d..2e8496259 100644 --- a/vendor/google.golang.org/api/cloudresourcemanager/v1/cloudresourcemanager-gen.go +++ b/vendor/google.golang.org/api/cloudresourcemanager/v1/cloudresourcemanager-gen.go @@ -1,4 +1,4 @@ -// Copyright 2022 Google LLC. +// Copyright 2023 Google LLC. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -8,35 +8,35 @@ // // For product documentation, see: https://cloud.google.com/resource-manager // -// Creating a client +// # Creating a client // // Usage example: // -// import "google.golang.org/api/cloudresourcemanager/v1" -// ... -// ctx := context.Background() -// cloudresourcemanagerService, err := cloudresourcemanager.NewService(ctx) +// import "google.golang.org/api/cloudresourcemanager/v1" +// ... +// ctx := context.Background() +// cloudresourcemanagerService, err := cloudresourcemanager.NewService(ctx) // // In this example, Google Application Default Credentials are used for authentication. // // For information on how to create and obtain Application Default Credentials, see https://developers.google.com/identity/protocols/application-default-credentials. // -// Other authentication options +// # Other authentication options // // By default, all available scopes (see "Constants") are used to authenticate. To restrict scopes, use option.WithScopes: // -// cloudresourcemanagerService, err := cloudresourcemanager.NewService(ctx, option.WithScopes(cloudresourcemanager.CloudPlatformReadOnlyScope)) +// cloudresourcemanagerService, err := cloudresourcemanager.NewService(ctx, option.WithScopes(cloudresourcemanager.CloudPlatformReadOnlyScope)) // // To use an API key for authentication (note: some APIs do not support API keys), use option.WithAPIKey: // -// cloudresourcemanagerService, err := cloudresourcemanager.NewService(ctx, option.WithAPIKey("AIza...")) +// cloudresourcemanagerService, err := cloudresourcemanager.NewService(ctx, option.WithAPIKey("AIza...")) // // To use an OAuth token (e.g., a user token obtained via a three-legged OAuth flow), use option.WithTokenSource: // -// config := &oauth2.Config{...} -// // ... -// token, err := config.Exchange(ctx, ...) -// cloudresourcemanagerService, err := cloudresourcemanager.NewService(ctx, option.WithTokenSource(config.TokenSource(ctx, token))) +// config := &oauth2.Config{...} +// // ... +// token, err := config.Exchange(ctx, ...) +// cloudresourcemanagerService, err := cloudresourcemanager.NewService(ctx, option.WithTokenSource(config.TokenSource(ctx, token))) // // See https://godoc.org/google.golang.org/api/option/ for details on options. package cloudresourcemanager // import "google.golang.org/api/cloudresourcemanager/v1" @@ -345,19 +345,26 @@ type Binding struct { // `allUsers`: A special identifier that represents anyone who is on the // internet; with or without a Google account. * // `allAuthenticatedUsers`: A special identifier that represents anyone - // who is authenticated with a Google account or a service account. * - // `user:{emailid}`: An email address that represents a specific Google - // account. For example, `alice@example.com` . * - // `serviceAccount:{emailid}`: An email address that represents a - // service account. For example, - // `my-other-app@appspot.gserviceaccount.com`. * `group:{emailid}`: An - // email address that represents a Google group. For example, - // `admins@example.com`. * `deleted:user:{emailid}?uid={uniqueid}`: An - // email address (plus unique identifier) representing a user that has - // been recently deleted. For example, - // `alice@example.com?uid=123456789012345678901`. If the user is - // recovered, this value reverts to `user:{emailid}` and the recovered - // user retains the role in the binding. * + // who is authenticated with a Google account or a service account. Does + // not include identities that come from external identity providers + // (IdPs) through identity federation. * `user:{emailid}`: An email + // address that represents a specific Google account. For example, + // `alice@example.com` . * `serviceAccount:{emailid}`: An email address + // that represents a Google service account. For example, + // `my-other-app@appspot.gserviceaccount.com`. * + // `serviceAccount:{projectid}.svc.id.goog[{namespace}/{kubernetes-sa}]`: + // An identifier for a Kubernetes service account + // (https://cloud.google.com/kubernetes-engine/docs/how-to/kubernetes-service-accounts). + // For example, `my-project.svc.id.goog[my-namespace/my-kubernetes-sa]`. + // * `group:{emailid}`: An email address that represents a Google group. + // For example, `admins@example.com`. * `domain:{domain}`: The G Suite + // domain (primary) that represents all the users of that domain. For + // example, `google.com` or `example.com`. * + // `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus + // unique identifier) representing a user that has been recently + // deleted. For example, `alice@example.com?uid=123456789012345678901`. + // If the user is recovered, this value reverts to `user:{emailid}` and + // the recovered user retains the role in the binding. * // `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address // (plus unique identifier) representing a service account that has been // recently deleted. For example, @@ -369,9 +376,7 @@ type Binding struct { // that has been recently deleted. For example, // `admins@example.com?uid=123456789012345678901`. If the group is // recovered, this value reverts to `group:{emailid}` and the recovered - // group retains the role in the binding. * `domain:{domain}`: The G - // Suite domain (primary) that represents all the users of that domain. - // For example, `google.com` or `example.com`. + // group retains the role in the binding. Members []string `json:"members,omitempty"` // Role: Role that is assigned to the list of `members`, or principals. @@ -496,7 +501,8 @@ func (s *ClearOrgPolicyRequest) MarshalJSON() ([]byte, error) { } // CloudresourcemanagerGoogleCloudResourcemanagerV2alpha1FolderOperation: -// Metadata describing a long running folder operation +// +// Metadata describing a long running folder operation type CloudresourcemanagerGoogleCloudResourcemanagerV2alpha1FolderOperation struct { // DestinationParent: The resource name of the folder or organization we // are either creating the folder under or moving the folder to. @@ -2517,17 +2523,17 @@ func (c *FoldersClearOrgPolicyCall) Do(opts ...googleapi.CallOption) (*Empty, er if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Empty{ ServerResponse: googleapi.ServerResponse{ @@ -2588,8 +2594,8 @@ type FoldersGetEffectiveOrgPolicyCall struct { // computed `Policy` across multiple resources. Subtrees of Resource // Manager resource hierarchy with 'under:' prefix will not be expanded. // -// - resource: The name of the resource to start computing the effective -// `Policy`. +// - resource: The name of the resource to start computing the effective +// `Policy`. func (r *FoldersService) GetEffectiveOrgPolicy(resource string, geteffectiveorgpolicyrequest *GetEffectiveOrgPolicyRequest) *FoldersGetEffectiveOrgPolicyCall { c := &FoldersGetEffectiveOrgPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -2664,17 +2670,17 @@ func (c *FoldersGetEffectiveOrgPolicyCall) Do(opts ...googleapi.CallOption) (*Or if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &OrgPolicy{ ServerResponse: googleapi.ServerResponse{ @@ -2811,17 +2817,17 @@ func (c *FoldersGetOrgPolicyCall) Do(opts ...googleapi.CallOption) (*OrgPolicy, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &OrgPolicy{ ServerResponse: googleapi.ServerResponse{ @@ -2957,17 +2963,17 @@ func (c *FoldersListAvailableOrgPolicyConstraintsCall) Do(opts ...googleapi.Call if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ListAvailableOrgPolicyConstraintsResponse{ ServerResponse: googleapi.ServerResponse{ @@ -3122,17 +3128,17 @@ func (c *FoldersListOrgPoliciesCall) Do(opts ...googleapi.CallOption) (*ListOrgP if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ListOrgPoliciesResponse{ ServerResponse: googleapi.ServerResponse{ @@ -3289,17 +3295,17 @@ func (c *FoldersSetOrgPolicyCall) Do(opts ...googleapi.CallOption) (*OrgPolicy, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &OrgPolicy{ ServerResponse: googleapi.ServerResponse{ @@ -3428,17 +3434,17 @@ func (c *LiensCreateCall) Do(opts ...googleapi.CallOption) (*Lien, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Lien{ ServerResponse: googleapi.ServerResponse{ @@ -3557,17 +3563,17 @@ func (c *LiensDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Empty{ ServerResponse: googleapi.ServerResponse{ @@ -3707,17 +3713,17 @@ func (c *LiensGetCall) Do(opts ...googleapi.CallOption) (*Lien, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Lien{ ServerResponse: googleapi.ServerResponse{ @@ -3877,17 +3883,17 @@ func (c *LiensListCall) Do(opts ...googleapi.CallOption) (*ListLiensResponse, er if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ListLiensResponse{ ServerResponse: googleapi.ServerResponse{ @@ -4054,17 +4060,17 @@ func (c *OperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, error) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -4194,17 +4200,17 @@ func (c *OrganizationsClearOrgPolicyCall) Do(opts ...googleapi.CallOption) (*Emp if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Empty{ ServerResponse: googleapi.ServerResponse{ @@ -4262,10 +4268,10 @@ type OrganizationsGetCall struct { // Get: Fetches an Organization resource identified by the specified // resource name. // -// - name: The resource name of the Organization to fetch. This is the -// organization's relative path in the API, formatted as -// "organizations/[organizationId]". For example, -// "organizations/1234". +// - name: The resource name of the Organization to fetch. This is the +// organization's relative path in the API, formatted as +// "organizations/[organizationId]". For example, +// "organizations/1234". func (r *OrganizationsService) Get(name string) *OrganizationsGetCall { c := &OrganizationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -4347,17 +4353,17 @@ func (c *OrganizationsGetCall) Do(opts ...googleapi.CallOption) (*Organization, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Organization{ ServerResponse: googleapi.ServerResponse{ @@ -4416,8 +4422,8 @@ type OrganizationsGetEffectiveOrgPolicyCall struct { // computed `Policy` across multiple resources. Subtrees of Resource // Manager resource hierarchy with 'under:' prefix will not be expanded. // -// - resource: The name of the resource to start computing the effective -// `Policy`. +// - resource: The name of the resource to start computing the effective +// `Policy`. func (r *OrganizationsService) GetEffectiveOrgPolicy(resource string, geteffectiveorgpolicyrequest *GetEffectiveOrgPolicyRequest) *OrganizationsGetEffectiveOrgPolicyCall { c := &OrganizationsGetEffectiveOrgPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -4492,17 +4498,17 @@ func (c *OrganizationsGetEffectiveOrgPolicyCall) Do(opts ...googleapi.CallOption if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &OrgPolicy{ ServerResponse: googleapi.ServerResponse{ @@ -4565,10 +4571,10 @@ type OrganizationsGetIamPolicyCall struct { // `resourcemanager.organizations.getIamPolicy` on the specified // organization // -// - resource: REQUIRED: The resource for which the policy is being -// requested. See Resource names -// (https://cloud.google.com/apis/design/resource_names) for the -// appropriate value for this field. +// - resource: REQUIRED: The resource for which the policy is being +// requested. See Resource names +// (https://cloud.google.com/apis/design/resource_names) for the +// appropriate value for this field. func (r *OrganizationsService) GetIamPolicy(resource string, getiampolicyrequest *GetIamPolicyRequest) *OrganizationsGetIamPolicyCall { c := &OrganizationsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -4643,17 +4649,17 @@ func (c *OrganizationsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Polic if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Policy{ ServerResponse: googleapi.ServerResponse{ @@ -4790,17 +4796,17 @@ func (c *OrganizationsGetOrgPolicyCall) Do(opts ...googleapi.CallOption) (*OrgPo if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &OrgPolicy{ ServerResponse: googleapi.ServerResponse{ @@ -4936,17 +4942,17 @@ func (c *OrganizationsListAvailableOrgPolicyConstraintsCall) Do(opts ...googleap if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ListAvailableOrgPolicyConstraintsResponse{ ServerResponse: googleapi.ServerResponse{ @@ -5101,17 +5107,17 @@ func (c *OrganizationsListOrgPoliciesCall) Do(opts ...googleapi.CallOption) (*Li if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ListOrgPoliciesResponse{ ServerResponse: googleapi.ServerResponse{ @@ -5262,17 +5268,17 @@ func (c *OrganizationsSearchCall) Do(opts ...googleapi.CallOption) (*SearchOrgan if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &SearchOrganizationsResponse{ ServerResponse: googleapi.ServerResponse{ @@ -5346,10 +5352,10 @@ type OrganizationsSetIamPolicyCall struct { // `resourcemanager.organizations.setIamPolicy` on the specified // organization // -// - resource: REQUIRED: The resource for which the policy is being -// specified. See Resource names -// (https://cloud.google.com/apis/design/resource_names) for the -// appropriate value for this field. +// - resource: REQUIRED: The resource for which the policy is being +// specified. See Resource names +// (https://cloud.google.com/apis/design/resource_names) for the +// appropriate value for this field. func (r *OrganizationsService) SetIamPolicy(resource string, setiampolicyrequest *SetIamPolicyRequest) *OrganizationsSetIamPolicyCall { c := &OrganizationsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -5424,17 +5430,17 @@ func (c *OrganizationsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Polic if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Policy{ ServerResponse: googleapi.ServerResponse{ @@ -5569,17 +5575,17 @@ func (c *OrganizationsSetOrgPolicyCall) Do(opts ...googleapi.CallOption) (*OrgPo if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &OrgPolicy{ ServerResponse: googleapi.ServerResponse{ @@ -5639,10 +5645,10 @@ type OrganizationsTestIamPermissionsCall struct { // organization's resource name, e.g. "organizations/123". There are no // permissions required for making this API call. // -// - resource: REQUIRED: The resource for which the policy detail is -// being requested. See Resource names -// (https://cloud.google.com/apis/design/resource_names) for the -// appropriate value for this field. +// - resource: REQUIRED: The resource for which the policy detail is +// being requested. See Resource names +// (https://cloud.google.com/apis/design/resource_names) for the +// appropriate value for this field. func (r *OrganizationsService) TestIamPermissions(resource string, testiampermissionsrequest *TestIamPermissionsRequest) *OrganizationsTestIamPermissionsCall { c := &OrganizationsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -5717,17 +5723,17 @@ func (c *OrganizationsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) ( if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TestIamPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ @@ -5860,17 +5866,17 @@ func (c *ProjectsClearOrgPolicyCall) Do(opts ...googleapi.CallOption) (*Empty, e if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Empty{ ServerResponse: googleapi.ServerResponse{ @@ -6006,17 +6012,17 @@ func (c *ProjectsCreateCall) Do(opts ...googleapi.CallOption) (*Operation, error if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -6142,17 +6148,17 @@ func (c *ProjectsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Empty{ ServerResponse: googleapi.ServerResponse{ @@ -6289,17 +6295,17 @@ func (c *ProjectsGetCall) Do(opts ...googleapi.CallOption) (*Project, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Project{ ServerResponse: googleapi.ServerResponse{ @@ -6431,17 +6437,17 @@ func (c *ProjectsGetAncestryCall) Do(opts ...googleapi.CallOption) (*GetAncestry if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &GetAncestryResponse{ ServerResponse: googleapi.ServerResponse{ @@ -6502,8 +6508,8 @@ type ProjectsGetEffectiveOrgPolicyCall struct { // computed `Policy` across multiple resources. Subtrees of Resource // Manager resource hierarchy with 'under:' prefix will not be expanded. // -// - resource: The name of the resource to start computing the effective -// `Policy`. +// - resource: The name of the resource to start computing the effective +// `Policy`. func (r *ProjectsService) GetEffectiveOrgPolicy(resource string, geteffectiveorgpolicyrequest *GetEffectiveOrgPolicyRequest) *ProjectsGetEffectiveOrgPolicyCall { c := &ProjectsGetEffectiveOrgPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -6578,17 +6584,17 @@ func (c *ProjectsGetEffectiveOrgPolicyCall) Do(opts ...googleapi.CallOption) (*O if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &OrgPolicy{ ServerResponse: googleapi.ServerResponse{ @@ -6652,10 +6658,10 @@ type ProjectsGetIamPolicyCall struct { // structure and identification, see Resource Names // (https://cloud.google.com/apis/design/resource_names). // -// - resource: REQUIRED: The resource for which the policy is being -// requested. See Resource names -// (https://cloud.google.com/apis/design/resource_names) for the -// appropriate value for this field. +// - resource: REQUIRED: The resource for which the policy is being +// requested. See Resource names +// (https://cloud.google.com/apis/design/resource_names) for the +// appropriate value for this field. func (r *ProjectsService) GetIamPolicy(resource string, getiampolicyrequest *GetIamPolicyRequest) *ProjectsGetIamPolicyCall { c := &ProjectsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -6730,17 +6736,17 @@ func (c *ProjectsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, er if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Policy{ ServerResponse: googleapi.ServerResponse{ @@ -6876,17 +6882,17 @@ func (c *ProjectsGetOrgPolicyCall) Do(opts ...googleapi.CallOption) (*OrgPolicy, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &OrgPolicy{ ServerResponse: googleapi.ServerResponse{ @@ -7079,17 +7085,17 @@ func (c *ProjectsListCall) Do(opts ...googleapi.CallOption) (*ListProjectsRespon if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ListProjectsResponse{ ServerResponse: googleapi.ServerResponse{ @@ -7250,17 +7256,17 @@ func (c *ProjectsListAvailableOrgPolicyConstraintsCall) Do(opts ...googleapi.Cal if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ListAvailableOrgPolicyConstraintsResponse{ ServerResponse: googleapi.ServerResponse{ @@ -7415,17 +7421,17 @@ func (c *ProjectsListOrgPoliciesCall) Do(opts ...googleapi.CallOption) (*ListOrg if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ListOrgPoliciesResponse{ ServerResponse: googleapi.ServerResponse{ @@ -7539,10 +7545,10 @@ type ProjectsSetIamPolicyCall struct { // organization inaccessible. Authorization requires the Google IAM // permission `resourcemanager.projects.setIamPolicy` on the project // -// - resource: REQUIRED: The resource for which the policy is being -// specified. See Resource names -// (https://cloud.google.com/apis/design/resource_names) for the -// appropriate value for this field. +// - resource: REQUIRED: The resource for which the policy is being +// specified. See Resource names +// (https://cloud.google.com/apis/design/resource_names) for the +// appropriate value for this field. func (r *ProjectsService) SetIamPolicy(resource string, setiampolicyrequest *SetIamPolicyRequest) *ProjectsSetIamPolicyCall { c := &ProjectsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -7617,17 +7623,17 @@ func (c *ProjectsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, er if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Policy{ ServerResponse: googleapi.ServerResponse{ @@ -7761,17 +7767,17 @@ func (c *ProjectsSetOrgPolicyCall) Do(opts ...googleapi.CallOption) (*OrgPolicy, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &OrgPolicy{ ServerResponse: googleapi.ServerResponse{ @@ -7832,10 +7838,10 @@ type ProjectsTestIamPermissionsCall struct { // (https://cloud.google.com/apis/design/resource_names). There are no // permissions required for making this API call. // -// - resource: REQUIRED: The resource for which the policy detail is -// being requested. See Resource names -// (https://cloud.google.com/apis/design/resource_names) for the -// appropriate value for this field. +// - resource: REQUIRED: The resource for which the policy detail is +// being requested. See Resource names +// (https://cloud.google.com/apis/design/resource_names) for the +// appropriate value for this field. func (r *ProjectsService) TestIamPermissions(resource string, testiampermissionsrequest *TestIamPermissionsRequest) *ProjectsTestIamPermissionsCall { c := &ProjectsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -7910,17 +7916,17 @@ func (c *ProjectsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Test if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TestIamPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ @@ -8056,17 +8062,17 @@ func (c *ProjectsUndeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Empty{ ServerResponse: googleapi.ServerResponse{ @@ -8199,17 +8205,17 @@ func (c *ProjectsUpdateCall) Do(opts ...googleapi.CallOption) (*Project, error) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Project{ ServerResponse: googleapi.ServerResponse{ diff --git a/vendor/google.golang.org/api/compute/v1/compute-api.json b/vendor/google.golang.org/api/compute/v1/compute-api.json index 4f6aa6b80..b380f1a70 100644 --- a/vendor/google.golang.org/api/compute/v1/compute-api.json +++ b/vendor/google.golang.org/api/compute/v1/compute-api.json @@ -549,6 +549,56 @@ "https://www.googleapis.com/auth/compute", "https://www.googleapis.com/auth/compute.readonly" ] + }, + "setLabels": { + "description": "Sets the labels on an Address. To learn more about labels, read the Labeling Resources documentation.", + "flatPath": "projects/{project}/regions/{region}/addresses/{resource}/setLabels", + "httpMethod": "POST", + "id": "compute.addresses.setLabels", + "parameterOrder": [ + "project", + "region", + "resource" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "The region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "resource": { + "description": "Name or id of the resource for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/regions/{region}/addresses/{resource}/setLabels", + "request": { + "$ref": "RegionSetLabelsRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] } } }, @@ -662,7 +712,7 @@ ] }, "get": { - "description": "Returns the specified autoscaler resource. Gets a list of available autoscalers by making a list() request.", + "description": "Returns the specified autoscaler resource.", "flatPath": "projects/{project}/zones/{zone}/autoscalers/{autoscaler}", "httpMethod": "GET", "id": "compute.autoscalers.get", @@ -1035,7 +1085,7 @@ ] }, "get": { - "description": "Returns the specified BackendBucket resource. Gets a list of available backend buckets by making a list() request.", + "description": "Returns the specified BackendBucket resource.", "flatPath": "projects/{project}/global/backendBuckets/{backendBucket}", "httpMethod": "GET", "id": "compute.backendBuckets.get", @@ -1473,7 +1523,7 @@ ] }, "get": { - "description": "Returns the specified BackendService resource. Gets a list of available backend services.", + "description": "Returns the specified BackendService resource.", "flatPath": "projects/{project}/global/backendServices/{backendService}", "httpMethod": "GET", "id": "compute.backendServices.get", @@ -1544,6 +1594,47 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, + "getIamPolicy": { + "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", + "flatPath": "projects/{project}/global/backendServices/{resource}/getIamPolicy", + "httpMethod": "GET", + "id": "compute.backendServices.getIamPolicy", + "parameterOrder": [ + "project", + "resource" + ], + "parameters": { + "optionsRequestedPolicyVersion": { + "description": "Requested IAM Policy version.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "resource": { + "description": "Name or id of the resource for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/global/backendServices/{resource}/getIamPolicy", + "response": { + "$ref": "Policy" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, "insert": { "description": "Creates a BackendService resource in the specified project using the data included in the request. For more information, see Backend services overview .", "flatPath": "projects/{project}/global/backendServices", @@ -1716,6 +1807,43 @@ "https://www.googleapis.com/auth/compute" ] }, + "setIamPolicy": { + "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + "flatPath": "projects/{project}/global/backendServices/{resource}/setIamPolicy", + "httpMethod": "POST", + "id": "compute.backendServices.setIamPolicy", + "parameterOrder": [ + "project", + "resource" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "resource": { + "description": "Name or id of the resource for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/global/backendServices/{resource}/setIamPolicy", + "request": { + "$ref": "GlobalSetPolicyRequest" + }, + "response": { + "$ref": "Policy" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, "setSecurityPolicy": { "description": "Sets the Google Cloud Armor security policy for the specified backend service. For more information, see Google Cloud Armor Overview", "flatPath": "projects/{project}/global/backendServices/{backendService}/setSecurityPolicy", @@ -1864,7 +1992,7 @@ ] }, "get": { - "description": "Returns the specified disk type. Gets a list of available disk types by making a list() request.", + "description": "Returns the specified disk type.", "flatPath": "projects/{project}/zones/{zone}/diskTypes/{diskType}", "httpMethod": "GET", "id": "compute.diskTypes.get", @@ -2185,7 +2313,7 @@ ] }, "get": { - "description": "Returns a specified persistent disk. Gets a list of available persistent disks by making a list() request.", + "description": "Returns the specified persistent disk.", "flatPath": "projects/{project}/zones/{zone}/disks/{disk}", "httpMethod": "GET", "id": "compute.disks.get", @@ -2626,6 +2754,67 @@ "https://www.googleapis.com/auth/compute", "https://www.googleapis.com/auth/compute.readonly" ] + }, + "update": { + "description": "Updates the specified disk with the data included in the request. The update is performed only on selected fields included as part of update-mask. Only the following fields can be modified: user_license.", + "flatPath": "projects/{project}/zones/{zone}/disks/{disk}", + "httpMethod": "PATCH", + "id": "compute.disks.update", + "parameterOrder": [ + "project", + "zone", + "disk" + ], + "parameters": { + "disk": { + "description": "The disk name for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "paths": { + "location": "query", + "repeated": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "updateMask": { + "description": "update_mask indicates fields to be updated as part of this request.", + "format": "google-fieldmask", + "location": "query", + "type": "string" + }, + "zone": { + "description": "The name of the zone for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/zones/{zone}/disks/{disk}", + "request": { + "$ref": "Disk" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] } } }, @@ -3197,7 +3386,7 @@ "type": "string" }, "parentId": { - "description": "Parent ID for this request.", + "description": "Parent ID for this request. The ID can be either be \"folders/[FOLDER_ID]\" if the parent is a folder or \"organizations/[ORGANIZATION_ID]\" if the parent is an organization.", "location": "query", "type": "string" }, @@ -3256,7 +3445,7 @@ "type": "string" }, "parentId": { - "description": "The new parent of the firewall policy.", + "description": "The new parent of the firewall policy. The ID can be either be \"folders/[FOLDER_ID]\" if the parent is a folder or \"organizations/[ORGANIZATION_ID]\" if the parent is an organization.", "location": "query", "type": "string" }, @@ -4185,7 +4374,7 @@ ] }, "get": { - "description": "Returns the specified address resource. Gets a list of available addresses by making a list() request.", + "description": "Returns the specified address resource.", "flatPath": "projects/{project}/global/addresses/{address}", "httpMethod": "GET", "id": "compute.globalAddresses.get", @@ -4307,6 +4496,43 @@ "https://www.googleapis.com/auth/compute", "https://www.googleapis.com/auth/compute.readonly" ] + }, + "setLabels": { + "description": "Sets the labels on a GlobalAddress. To learn more about labels, read the Labeling Resources documentation.", + "flatPath": "projects/{project}/global/addresses/{resource}/setLabels", + "httpMethod": "POST", + "id": "compute.globalAddresses.setLabels", + "parameterOrder": [ + "project", + "resource" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "resource": { + "description": "Name or id of the resource for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/global/addresses/{resource}/setLabels", + "request": { + "$ref": "GlobalSetLabelsRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] } } }, @@ -4721,7 +4947,7 @@ ] }, "get": { - "description": "Returns the specified network endpoint group. Gets a list of available network endpoint groups by making a list() request.", + "description": "Returns the specified network endpoint group.", "flatPath": "projects/{project}/global/networkEndpointGroups/{networkEndpointGroup}", "httpMethod": "GET", "id": "compute.globalNetworkEndpointGroups.get", @@ -5552,7 +5778,7 @@ ] }, "get": { - "description": "Returns the specified HealthCheck resource. Gets a list of available health checks by making a list() request.", + "description": "Returns the specified HealthCheck resource.", "flatPath": "projects/{project}/global/healthChecks/{healthCheck}", "httpMethod": "GET", "id": "compute.healthChecks.get", @@ -5803,7 +6029,7 @@ ] }, "get": { - "description": "Returns the specified HttpHealthCheck resource. Gets a list of available HTTP health checks by making a list() request.", + "description": "Returns the specified HttpHealthCheck resource.", "flatPath": "projects/{project}/global/httpHealthChecks/{httpHealthCheck}", "httpMethod": "GET", "id": "compute.httpHealthChecks.get", @@ -6054,7 +6280,7 @@ ] }, "get": { - "description": "Returns the specified HttpsHealthCheck resource. Gets a list of available HTTPS health checks by making a list() request.", + "description": "Returns the specified HttpsHealthCheck resource.", "flatPath": "projects/{project}/global/httpsHealthChecks/{httpsHealthCheck}", "httpMethod": "GET", "id": "compute.httpsHealthChecks.get", @@ -6394,7 +6620,7 @@ ] }, "get": { - "description": "Returns the specified image. Gets a list of available images by making a list() request.", + "description": "Returns the specified image.", "flatPath": "projects/{project}/global/images/{image}", "httpMethod": "GET", "id": "compute.images.get", @@ -6429,7 +6655,7 @@ ] }, "getFromFamily": { - "description": "Returns the latest image that is part of an image family and is not deprecated.", + "description": "Returns the latest image that is part of an image family and is not deprecated. For more information on image families, see Public image families documentation.", "flatPath": "projects/{project}/global/images/family/{family}", "httpMethod": "GET", "id": "compute.images.getFromFamily", @@ -6446,7 +6672,7 @@ "type": "string" }, "project": { - "description": "Project ID for this request.", + "description": "The image project that the image belongs to. For example, to get a CentOS image, specify centos-cloud as the image project.", "location": "path", "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, @@ -7095,7 +7321,7 @@ ] }, "get": { - "description": "Returns all of the details about the specified managed instance group. Gets a list of available managed instance groups by making a list() request.", + "description": "Returns all of the details about the specified managed instance group.", "flatPath": "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}", "httpMethod": "GET", "id": "compute.instanceGroupManagers.get", @@ -8259,6 +8485,66 @@ }, "instanceTemplates": { "methods": { + "aggregatedList": { + "description": "Retrieves the list of all InstanceTemplates resources, regional and global, available to the specified project.", + "flatPath": "projects/{project}/aggregated/instanceTemplates", + "httpMethod": "GET", + "id": "compute.instanceTemplates.aggregatedList", + "parameterOrder": [ + "project" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "location": "query", + "type": "string" + }, + "includeAllScopes": { + "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + "location": "query", + "type": "boolean" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Name of the project scoping this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + "location": "query", + "type": "boolean" + } + }, + "path": "projects/{project}/aggregated/instanceTemplates", + "response": { + "$ref": "InstanceTemplateAggregatedList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, "delete": { "description": "Deletes the specified instance template. Deleting an instance template is permanent and cannot be undone. It is not possible to delete templates that are already in use by a managed instance group.", "flatPath": "projects/{project}/global/instanceTemplates/{instanceTemplate}", @@ -8299,7 +8585,7 @@ ] }, "get": { - "description": "Returns the specified instance template. Gets a list of available instance templates by making a list() request.", + "description": "Returns the specified instance template.", "flatPath": "projects/{project}/global/instanceTemplates/{instanceTemplate}", "httpMethod": "GET", "id": "compute.instanceTemplates.get", @@ -8969,7 +9255,7 @@ ] }, "get": { - "description": "Returns the specified Instance resource. Gets a list of available instances by making a list() request.", + "description": "Returns the specified Instance resource.", "flatPath": "projects/{project}/zones/{zone}/instances/{instance}", "httpMethod": "GET", "id": "compute.instances.get", @@ -10086,6 +10372,56 @@ "https://www.googleapis.com/auth/compute" ] }, + "setName": { + "description": "Sets name of an instance.", + "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/setName", + "httpMethod": "POST", + "id": "compute.instances.setName", + "parameterOrder": [ + "project", + "zone", + "instance" + ], + "parameters": { + "instance": { + "description": "The instance name for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "zone": { + "description": "The name of the zone for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/zones/{zone}/instances/{instance}/setName", + "request": { + "$ref": "InstancesSetNameRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, "setScheduling": { "description": "Sets an instance's scheduling options. You can only call this method on a stopped instance, that is, a VM instance that is in a `TERMINATED` state. See Instance Life Cycle for more information on the possible instance states. For more information about setting scheduling options for a VM, see Set VM host maintenance policy.", "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/setScheduling", @@ -10436,6 +10772,11 @@ "instance" ], "parameters": { + "discardLocalSsd": { + "description": "If true, discard the contents of any attached localSSD partitions. Default value is false.", + "location": "query", + "type": "boolean" + }, "instance": { "description": "Name of the instance resource to stop.", "location": "path", @@ -10483,6 +10824,11 @@ "instance" ], "parameters": { + "discardLocalSsd": { + "description": "If true, discard the contents of any attached localSSD partitions. Default value is false.", + "location": "query", + "type": "boolean" + }, "instance": { "description": "Name of the instance resource to suspend.", "location": "path", @@ -11176,136 +11522,49 @@ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute" ] - } - } - }, - "interconnectLocations": { - "methods": { - "get": { - "description": "Returns the details for the specified interconnect location. Gets a list of available interconnect locations by making a list() request.", - "flatPath": "projects/{project}/global/interconnectLocations/{interconnectLocation}", - "httpMethod": "GET", - "id": "compute.interconnectLocations.get", + }, + "setLabels": { + "description": "Sets the labels on an InterconnectAttachment. To learn more about labels, read the Labeling Resources documentation.", + "flatPath": "projects/{project}/regions/{region}/interconnectAttachments/{resource}/setLabels", + "httpMethod": "POST", + "id": "compute.interconnectAttachments.setLabels", "parameterOrder": [ "project", - "interconnectLocation" + "region", + "resource" ], "parameters": { - "interconnectLocation": { - "description": "Name of the interconnect location to return.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "required": true, - "type": "string" - }, "project": { "description": "Project ID for this request.", "location": "path", "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" - } - }, - "path": "projects/{project}/global/interconnectLocations/{interconnectLocation}", - "response": { - "$ref": "InterconnectLocation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "list": { - "description": "Retrieves the list of interconnect locations available to the specified project.", - "flatPath": "projects/{project}/global/interconnectLocations", - "httpMethod": "GET", - "id": "compute.interconnectLocations.list", - "parameterOrder": [ - "project" - ], - "parameters": { - "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", - "location": "query", - "type": "string" - }, - "maxResults": { - "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", - "format": "uint32", - "location": "query", - "minimum": "0", - "type": "integer" - }, - "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", - "location": "query", - "type": "string" - }, - "pageToken": { - "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", - "location": "query", - "type": "string" }, - "project": { - "description": "Project ID for this request.", + "region": { + "description": "The region for this request.", "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, - "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", "location": "query", - "type": "boolean" - } - }, - "path": "projects/{project}/global/interconnectLocations", - "response": { - "$ref": "InterconnectLocationList" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - } - } - }, - "interconnects": { - "methods": { - "delete": { - "description": "Deletes the specified interconnect.", - "flatPath": "projects/{project}/global/interconnects/{interconnect}", - "httpMethod": "DELETE", - "id": "compute.interconnects.delete", - "parameterOrder": [ - "project", - "interconnect" - ], - "parameters": { - "interconnect": { - "description": "Name of the interconnect to delete.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "required": true, "type": "string" }, - "project": { - "description": "Project ID for this request.", + "resource": { + "description": "Name or id of the resource for this request.", "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" - }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - "location": "query", - "type": "string" } }, - "path": "projects/{project}/global/interconnects/{interconnect}", + "path": "projects/{project}/regions/{region}/interconnectAttachments/{resource}/setLabels", + "request": { + "$ref": "RegionSetLabelsRequest" + }, "response": { "$ref": "Operation" }, @@ -11313,54 +11572,23 @@ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute" ] - }, + } + } + }, + "interconnectLocations": { + "methods": { "get": { - "description": "Returns the specified interconnect. Get a list of available interconnects by making a list() request.", - "flatPath": "projects/{project}/global/interconnects/{interconnect}", - "httpMethod": "GET", - "id": "compute.interconnects.get", - "parameterOrder": [ - "project", - "interconnect" - ], - "parameters": { - "interconnect": { - "description": "Name of the interconnect to return.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "required": true, - "type": "string" - }, - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, - "type": "string" - } - }, - "path": "projects/{project}/global/interconnects/{interconnect}", - "response": { - "$ref": "Interconnect" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "getDiagnostics": { - "description": "Returns the interconnectDiagnostics for the specified interconnect.", - "flatPath": "projects/{project}/global/interconnects/{interconnect}/getDiagnostics", + "description": "Returns the details for the specified interconnect location. Gets a list of available interconnect locations by making a list() request.", + "flatPath": "projects/{project}/global/interconnectLocations/{interconnectLocation}", "httpMethod": "GET", - "id": "compute.interconnects.getDiagnostics", + "id": "compute.interconnectLocations.get", "parameterOrder": [ "project", - "interconnect" + "interconnectLocation" ], "parameters": { - "interconnect": { - "description": "Name of the interconnect resource to query.", + "interconnectLocation": { + "description": "Name of the interconnect location to return.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, @@ -11374,9 +11602,9 @@ "type": "string" } }, - "path": "projects/{project}/global/interconnects/{interconnect}/getDiagnostics", + "path": "projects/{project}/global/interconnectLocations/{interconnectLocation}", "response": { - "$ref": "InterconnectsGetDiagnosticsResponse" + "$ref": "InterconnectLocation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -11384,45 +11612,213 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "insert": { - "description": "Creates a Interconnect in the specified project using the data included in the request.", - "flatPath": "projects/{project}/global/interconnects", - "httpMethod": "POST", - "id": "compute.interconnects.insert", - "parameterOrder": [ - "project" - ], - "parameters": { - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, - "type": "string" - }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - "location": "query", - "type": "string" - } - }, - "path": "projects/{project}/global/interconnects", - "request": { - "$ref": "Interconnect" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, "list": { - "description": "Retrieves the list of interconnect available to the specified project.", - "flatPath": "projects/{project}/global/interconnects", + "description": "Retrieves the list of interconnect locations available to the specified project.", + "flatPath": "projects/{project}/global/interconnectLocations", "httpMethod": "GET", - "id": "compute.interconnects.list", + "id": "compute.interconnectLocations.list", + "parameterOrder": [ + "project" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "location": "query", + "type": "string" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + "location": "query", + "type": "boolean" + } + }, + "path": "projects/{project}/global/interconnectLocations", + "response": { + "$ref": "InterconnectLocationList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + } + } + }, + "interconnects": { + "methods": { + "delete": { + "description": "Deletes the specified Interconnect.", + "flatPath": "projects/{project}/global/interconnects/{interconnect}", + "httpMethod": "DELETE", + "id": "compute.interconnects.delete", + "parameterOrder": [ + "project", + "interconnect" + ], + "parameters": { + "interconnect": { + "description": "Name of the interconnect to delete.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "projects/{project}/global/interconnects/{interconnect}", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "description": "Returns the specified Interconnect. Get a list of available Interconnects by making a list() request.", + "flatPath": "projects/{project}/global/interconnects/{interconnect}", + "httpMethod": "GET", + "id": "compute.interconnects.get", + "parameterOrder": [ + "project", + "interconnect" + ], + "parameters": { + "interconnect": { + "description": "Name of the interconnect to return.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/global/interconnects/{interconnect}", + "response": { + "$ref": "Interconnect" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "getDiagnostics": { + "description": "Returns the interconnectDiagnostics for the specified Interconnect.", + "flatPath": "projects/{project}/global/interconnects/{interconnect}/getDiagnostics", + "httpMethod": "GET", + "id": "compute.interconnects.getDiagnostics", + "parameterOrder": [ + "project", + "interconnect" + ], + "parameters": { + "interconnect": { + "description": "Name of the interconnect resource to query.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/global/interconnects/{interconnect}/getDiagnostics", + "response": { + "$ref": "InterconnectsGetDiagnosticsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "description": "Creates an Interconnect in the specified project using the data included in the request.", + "flatPath": "projects/{project}/global/interconnects", + "httpMethod": "POST", + "id": "compute.interconnects.insert", + "parameterOrder": [ + "project" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "projects/{project}/global/interconnects", + "request": { + "$ref": "Interconnect" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "description": "Retrieves the list of Interconnects available to the specified project.", + "flatPath": "projects/{project}/global/interconnects", + "httpMethod": "GET", + "id": "compute.interconnects.list", "parameterOrder": [ "project" ], @@ -11474,7 +11870,7 @@ ] }, "patch": { - "description": "Updates the specified interconnect with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + "description": "Updates the specified Interconnect with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", "flatPath": "projects/{project}/global/interconnects/{interconnect}", "httpMethod": "PATCH", "id": "compute.interconnects.patch", @@ -11514,6 +11910,43 @@ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute" ] + }, + "setLabels": { + "description": "Sets the labels on an Interconnect. To learn more about labels, read the Labeling Resources documentation.", + "flatPath": "projects/{project}/global/interconnects/{resource}/setLabels", + "httpMethod": "POST", + "id": "compute.interconnects.setLabels", + "parameterOrder": [ + "project", + "resource" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "resource": { + "description": "Name or id of the resource for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/global/interconnects/{resource}/setLabels", + "request": { + "$ref": "GlobalSetLabelsRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] } } }, @@ -11922,7 +12355,7 @@ ] }, "get": { - "description": "Returns the specified machine image. Gets a list of available machine images by making a list() request.", + "description": "Returns the specified machine image.", "flatPath": "projects/{project}/global/machineImages/{machineImage}", "httpMethod": "GET", "id": "compute.machineImages.get", @@ -12231,7 +12664,7 @@ ] }, "get": { - "description": "Returns the specified machine type. Gets a list of available machine types by making a list() request.", + "description": "Returns the specified machine type.", "flatPath": "projects/{project}/zones/{zone}/machineTypes/{machineType}", "httpMethod": "GET", "id": "compute.machineTypes.get", @@ -12338,13 +12771,13 @@ } } }, - "networkEdgeSecurityServices": { + "networkAttachments": { "methods": { "aggregatedList": { - "description": "Retrieves the list of all NetworkEdgeSecurityService resources available to the specified project.", - "flatPath": "projects/{project}/aggregated/networkEdgeSecurityServices", + "description": "Retrieves the list of all NetworkAttachment resources, regional and global, available to the specified project.", + "flatPath": "projects/{project}/aggregated/networkAttachments", "httpMethod": "GET", - "id": "compute.networkEdgeSecurityServices.aggregatedList", + "id": "compute.networkAttachments.aggregatedList", "parameterOrder": [ "project" ], @@ -12378,7 +12811,7 @@ "type": "string" }, "project": { - "description": "Name of the project scoping this request.", + "description": "Project ID for this request.", "location": "path", "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, @@ -12390,9 +12823,9 @@ "type": "boolean" } }, - "path": "projects/{project}/aggregated/networkEdgeSecurityServices", + "path": "projects/{project}/aggregated/networkAttachments", "response": { - "$ref": "NetworkEdgeSecurityServiceAggregatedList" + "$ref": "NetworkAttachmentAggregatedList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -12401,18 +12834,18 @@ ] }, "delete": { - "description": "Deletes the specified service.", - "flatPath": "projects/{project}/regions/{region}/networkEdgeSecurityServices/{networkEdgeSecurityService}", + "description": "Deletes the specified NetworkAttachment in the given scope", + "flatPath": "projects/{project}/regions/{region}/networkAttachments/{networkAttachment}", "httpMethod": "DELETE", - "id": "compute.networkEdgeSecurityServices.delete", + "id": "compute.networkAttachments.delete", "parameterOrder": [ "project", "region", - "networkEdgeSecurityService" + "networkAttachment" ], "parameters": { - "networkEdgeSecurityService": { - "description": "Name of the network edge security service to delete.", + "networkAttachment": { + "description": "Name of the NetworkAttachment resource to delete.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, @@ -12426,19 +12859,19 @@ "type": "string" }, "region": { - "description": "Name of the region scoping this request.", + "description": "Name of the region of this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000). end_interface: MixerMutationRequestBuilder", "location": "query", "type": "string" } }, - "path": "projects/{project}/regions/{region}/networkEdgeSecurityServices/{networkEdgeSecurityService}", + "path": "projects/{project}/regions/{region}/networkAttachments/{networkAttachment}", "response": { "$ref": "Operation" }, @@ -12448,18 +12881,18 @@ ] }, "get": { - "description": "Gets a specified NetworkEdgeSecurityService.", - "flatPath": "projects/{project}/regions/{region}/networkEdgeSecurityServices/{networkEdgeSecurityService}", + "description": "Returns the specified NetworkAttachment resource in the given scope.", + "flatPath": "projects/{project}/regions/{region}/networkAttachments/{networkAttachment}", "httpMethod": "GET", - "id": "compute.networkEdgeSecurityServices.get", + "id": "compute.networkAttachments.get", "parameterOrder": [ "project", "region", - "networkEdgeSecurityService" + "networkAttachment" ], "parameters": { - "networkEdgeSecurityService": { - "description": "Name of the network edge security service to get.", + "networkAttachment": { + "description": "Name of the NetworkAttachment resource to return.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, @@ -12473,16 +12906,16 @@ "type": "string" }, "region": { - "description": "Name of the region scoping this request.", + "description": "Name of the region of this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" } }, - "path": "projects/{project}/regions/{region}/networkEdgeSecurityServices/{networkEdgeSecurityService}", + "path": "projects/{project}/regions/{region}/networkAttachments/{networkAttachment}", "response": { - "$ref": "NetworkEdgeSecurityService" + "$ref": "NetworkAttachment" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -12490,16 +12923,23 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "insert": { - "description": "Creates a new service in the specified project using the data included in the request.", - "flatPath": "projects/{project}/regions/{region}/networkEdgeSecurityServices", - "httpMethod": "POST", - "id": "compute.networkEdgeSecurityServices.insert", + "getIamPolicy": { + "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", + "flatPath": "projects/{project}/regions/{region}/networkAttachments/{resource}/getIamPolicy", + "httpMethod": "GET", + "id": "compute.networkAttachments.getIamPolicy", "parameterOrder": [ "project", - "region" + "region", + "resource" ], "parameters": { + "optionsRequestedPolicyVersion": { + "description": "Requested IAM Policy version.", + "format": "int32", + "location": "query", + "type": "integer" + }, "project": { "description": "Project ID for this request.", "location": "path", @@ -12508,58 +12948,40 @@ "type": "string" }, "region": { - "description": "Name of the region scoping this request.", + "description": "The name of the region for this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - "location": "query", + "resource": { + "description": "Name or id of the resource for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, "type": "string" - }, - "validateOnly": { - "description": "If true, the request will not be committed.", - "location": "query", - "type": "boolean" } }, - "path": "projects/{project}/regions/{region}/networkEdgeSecurityServices", - "request": { - "$ref": "NetworkEdgeSecurityService" - }, + "path": "projects/{project}/regions/{region}/networkAttachments/{resource}/getIamPolicy", "response": { - "$ref": "Operation" + "$ref": "Policy" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" ] }, - "patch": { - "description": "Patches the specified policy with the data included in the request.", - "flatPath": "projects/{project}/regions/{region}/networkEdgeSecurityServices/{networkEdgeSecurityService}", - "httpMethod": "PATCH", - "id": "compute.networkEdgeSecurityServices.patch", + "insert": { + "description": "Creates a NetworkAttachment in the specified project in the given scope using the parameters that are included in the request.", + "flatPath": "projects/{project}/regions/{region}/networkAttachments", + "httpMethod": "POST", + "id": "compute.networkAttachments.insert", "parameterOrder": [ "project", - "region", - "networkEdgeSecurityService" + "region" ], "parameters": { - "networkEdgeSecurityService": { - "description": "Name of the network edge security service to update.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "required": true, - "type": "string" - }, - "paths": { - "location": "query", - "repeated": true, - "type": "string" - }, "project": { "description": "Project ID for this request.", "location": "path", @@ -12568,27 +12990,21 @@ "type": "string" }, "region": { - "description": "Name of the region scoping this request.", + "description": "Name of the region of this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - "location": "query", - "type": "string" - }, - "updateMask": { - "description": "Indicates fields to be updated as part of this request.", - "format": "google-fieldmask", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000). end_interface: MixerMutationRequestBuilder", "location": "query", "type": "string" } }, - "path": "projects/{project}/regions/{region}/networkEdgeSecurityServices/{networkEdgeSecurityService}", + "path": "projects/{project}/regions/{region}/networkAttachments", "request": { - "$ref": "NetworkEdgeSecurityService" + "$ref": "NetworkAttachment" }, "response": { "$ref": "Operation" @@ -12597,18 +13013,15 @@ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute" ] - } - } - }, - "networkEndpointGroups": { - "methods": { - "aggregatedList": { - "description": "Retrieves the list of network endpoint groups and sorts them by zone.", - "flatPath": "projects/{project}/aggregated/networkEndpointGroups", + }, + "list": { + "description": "Lists the NetworkAttachments for a project in the given scope.", + "flatPath": "projects/{project}/regions/{region}/networkAttachments", "httpMethod": "GET", - "id": "compute.networkEndpointGroups.aggregatedList", + "id": "compute.networkAttachments.list", "parameterOrder": [ - "project" + "project", + "region" ], "parameters": { "filter": { @@ -12616,11 +13029,430 @@ "location": "query", "type": "string" }, - "includeAllScopes": { - "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", - "location": "query", - "type": "boolean" - }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region of this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + "location": "query", + "type": "boolean" + } + }, + "path": "projects/{project}/regions/{region}/networkAttachments", + "response": { + "$ref": "NetworkAttachmentList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "setIamPolicy": { + "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + "flatPath": "projects/{project}/regions/{region}/networkAttachments/{resource}/setIamPolicy", + "httpMethod": "POST", + "id": "compute.networkAttachments.setIamPolicy", + "parameterOrder": [ + "project", + "region", + "resource" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "The name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "resource": { + "description": "Name or id of the resource for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/regions/{region}/networkAttachments/{resource}/setIamPolicy", + "request": { + "$ref": "RegionSetPolicyRequest" + }, + "response": { + "$ref": "Policy" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "testIamPermissions": { + "description": "Returns permissions that a caller has on the specified resource.", + "flatPath": "projects/{project}/regions/{region}/networkAttachments/{resource}/testIamPermissions", + "httpMethod": "POST", + "id": "compute.networkAttachments.testIamPermissions", + "parameterOrder": [ + "project", + "region", + "resource" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "The name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "resource": { + "description": "Name or id of the resource for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/regions/{region}/networkAttachments/{resource}/testIamPermissions", + "request": { + "$ref": "TestPermissionsRequest" + }, + "response": { + "$ref": "TestPermissionsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + } + } + }, + "networkEdgeSecurityServices": { + "methods": { + "aggregatedList": { + "description": "Retrieves the list of all NetworkEdgeSecurityService resources available to the specified project.", + "flatPath": "projects/{project}/aggregated/networkEdgeSecurityServices", + "httpMethod": "GET", + "id": "compute.networkEdgeSecurityServices.aggregatedList", + "parameterOrder": [ + "project" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "location": "query", + "type": "string" + }, + "includeAllScopes": { + "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + "location": "query", + "type": "boolean" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Name of the project scoping this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + "location": "query", + "type": "boolean" + } + }, + "path": "projects/{project}/aggregated/networkEdgeSecurityServices", + "response": { + "$ref": "NetworkEdgeSecurityServiceAggregatedList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "delete": { + "description": "Deletes the specified service.", + "flatPath": "projects/{project}/regions/{region}/networkEdgeSecurityServices/{networkEdgeSecurityService}", + "httpMethod": "DELETE", + "id": "compute.networkEdgeSecurityServices.delete", + "parameterOrder": [ + "project", + "region", + "networkEdgeSecurityService" + ], + "parameters": { + "networkEdgeSecurityService": { + "description": "Name of the network edge security service to delete.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "projects/{project}/regions/{region}/networkEdgeSecurityServices/{networkEdgeSecurityService}", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "description": "Gets a specified NetworkEdgeSecurityService.", + "flatPath": "projects/{project}/regions/{region}/networkEdgeSecurityServices/{networkEdgeSecurityService}", + "httpMethod": "GET", + "id": "compute.networkEdgeSecurityServices.get", + "parameterOrder": [ + "project", + "region", + "networkEdgeSecurityService" + ], + "parameters": { + "networkEdgeSecurityService": { + "description": "Name of the network edge security service to get.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/regions/{region}/networkEdgeSecurityServices/{networkEdgeSecurityService}", + "response": { + "$ref": "NetworkEdgeSecurityService" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "description": "Creates a new service in the specified project using the data included in the request.", + "flatPath": "projects/{project}/regions/{region}/networkEdgeSecurityServices", + "httpMethod": "POST", + "id": "compute.networkEdgeSecurityServices.insert", + "parameterOrder": [ + "project", + "region" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "validateOnly": { + "description": "If true, the request will not be committed.", + "location": "query", + "type": "boolean" + } + }, + "path": "projects/{project}/regions/{region}/networkEdgeSecurityServices", + "request": { + "$ref": "NetworkEdgeSecurityService" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "patch": { + "description": "Patches the specified policy with the data included in the request.", + "flatPath": "projects/{project}/regions/{region}/networkEdgeSecurityServices/{networkEdgeSecurityService}", + "httpMethod": "PATCH", + "id": "compute.networkEdgeSecurityServices.patch", + "parameterOrder": [ + "project", + "region", + "networkEdgeSecurityService" + ], + "parameters": { + "networkEdgeSecurityService": { + "description": "Name of the network edge security service to update.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "paths": { + "location": "query", + "repeated": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "updateMask": { + "description": "Indicates fields to be updated as part of this request.", + "format": "google-fieldmask", + "location": "query", + "type": "string" + } + }, + "path": "projects/{project}/regions/{region}/networkEdgeSecurityServices/{networkEdgeSecurityService}", + "request": { + "$ref": "NetworkEdgeSecurityService" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + } + } + }, + "networkEndpointGroups": { + "methods": { + "aggregatedList": { + "description": "Retrieves the list of network endpoint groups and sorts them by zone.", + "flatPath": "projects/{project}/aggregated/networkEndpointGroups", + "httpMethod": "GET", + "id": "compute.networkEndpointGroups.aggregatedList", + "parameterOrder": [ + "project" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "location": "query", + "type": "string" + }, + "includeAllScopes": { + "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + "location": "query", + "type": "boolean" + }, "maxResults": { "default": "500", "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", @@ -12804,7 +13636,7 @@ ] }, "get": { - "description": "Returns the specified network endpoint group. Gets a list of available network endpoint groups by making a list() request.", + "description": "Returns the specified network endpoint group.", "flatPath": "projects/{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}", "httpMethod": "GET", "id": "compute.networkEndpointGroups.get", @@ -13839,7 +14671,7 @@ ] }, "get": { - "description": "Returns the specified network. Gets a list of available networks by making a list() request.", + "description": "Returns the specified network.", "flatPath": "projects/{project}/global/networks/{network}", "httpMethod": "GET", "id": "compute.networks.get", @@ -15038,7 +15870,7 @@ ] }, "get": { - "description": "Returns the specified node template. Gets a list of available node templates by making a list() request.", + "description": "Returns the specified node template.", "flatPath": "projects/{project}/regions/{region}/nodeTemplates/{nodeTemplate}", "httpMethod": "GET", "id": "compute.nodeTemplates.get", @@ -15390,7 +16222,7 @@ ] }, "get": { - "description": "Returns the specified node type. Gets a list of available node types by making a list() request.", + "description": "Returns the specified node type.", "flatPath": "projects/{project}/zones/{zone}/nodeTypes/{nodeType}", "httpMethod": "GET", "id": "compute.nodeTypes.get", @@ -17273,6 +18105,55 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, + "getIamPolicy": { + "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", + "flatPath": "projects/{project}/regions/{region}/backendServices/{resource}/getIamPolicy", + "httpMethod": "GET", + "id": "compute.regionBackendServices.getIamPolicy", + "parameterOrder": [ + "project", + "region", + "resource" + ], + "parameters": { + "optionsRequestedPolicyVersion": { + "description": "Requested IAM Policy version.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "The name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "resource": { + "description": "Name or id of the resource for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/regions/{region}/backendServices/{resource}/getIamPolicy", + "response": { + "$ref": "Policy" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, "insert": { "description": "Creates a regional BackendService resource in the specified project using the data included in the request. For more information, see Backend services overview.", "flatPath": "projects/{project}/regions/{region}/backendServices", @@ -17428,6 +18309,51 @@ "https://www.googleapis.com/auth/compute" ] }, + "setIamPolicy": { + "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + "flatPath": "projects/{project}/regions/{region}/backendServices/{resource}/setIamPolicy", + "httpMethod": "POST", + "id": "compute.regionBackendServices.setIamPolicy", + "parameterOrder": [ + "project", + "region", + "resource" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "The name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "resource": { + "description": "Name or id of the resource for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/regions/{region}/backendServices/{resource}/setIamPolicy", + "request": { + "$ref": "RegionSetPolicyRequest" + }, + "response": { + "$ref": "Policy" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, "update": { "description": "Updates the specified regional BackendService resource with the data included in the request. For more information, see Backend services overview .", "flatPath": "projects/{project}/regions/{region}/backendServices/{backendService}", @@ -17543,7 +18469,7 @@ ] }, "get": { - "description": "Returns the specified commitment resource. Gets a list of available commitments by making a list() request.", + "description": "Returns the specified commitment resource.", "flatPath": "projects/{project}/regions/{region}/commitments/{commitment}", "httpMethod": "GET", "id": "compute.regionCommitments.get", @@ -17756,7 +18682,7 @@ "regionDiskTypes": { "methods": { "get": { - "description": "Returns the specified regional disk type. Gets a list of available disk types by making a list() request.", + "description": "Returns the specified regional disk type.", "flatPath": "projects/{project}/regions/{region}/diskTypes/{diskType}", "httpMethod": "GET", "id": "compute.regionDiskTypes.get", @@ -18453,109 +19379,30 @@ "https://www.googleapis.com/auth/compute", "https://www.googleapis.com/auth/compute.readonly" ] - } - } - }, - "regionHealthCheckServices": { - "methods": { - "delete": { - "description": "Deletes the specified regional HealthCheckService.", - "flatPath": "projects/{project}/regions/{region}/healthCheckServices/{healthCheckService}", - "httpMethod": "DELETE", - "id": "compute.regionHealthCheckServices.delete", - "parameterOrder": [ - "project", - "region", - "healthCheckService" - ], - "parameters": { - "healthCheckService": { - "description": "Name of the HealthCheckService to delete. The name must be 1-63 characters long, and comply with RFC1035.", - "location": "path", - "required": true, - "type": "string" - }, - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, - "type": "string" - }, - "region": { - "description": "Name of the region scoping this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - "location": "query", - "type": "string" - } - }, - "path": "projects/{project}/regions/{region}/healthCheckServices/{healthCheckService}", - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] }, - "get": { - "description": "Returns the specified regional HealthCheckService resource.", - "flatPath": "projects/{project}/regions/{region}/healthCheckServices/{healthCheckService}", - "httpMethod": "GET", - "id": "compute.regionHealthCheckServices.get", + "update": { + "description": "Update the specified disk with the data included in the request. Update is performed only on selected fields included as part of update-mask. Only the following fields can be modified: user_license.", + "flatPath": "projects/{project}/regions/{region}/disks/{disk}", + "httpMethod": "PATCH", + "id": "compute.regionDisks.update", "parameterOrder": [ "project", "region", - "healthCheckService" + "disk" ], "parameters": { - "healthCheckService": { - "description": "Name of the HealthCheckService to update. The name must be 1-63 characters long, and comply with RFC1035.", + "disk": { + "description": "The disk name for this request.", "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, + "paths": { + "location": "query", + "repeated": true, "type": "string" }, - "region": { - "description": "Name of the region scoping this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - } - }, - "path": "projects/{project}/regions/{region}/healthCheckServices/{healthCheckService}", - "response": { - "$ref": "HealthCheckService" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "insert": { - "description": "Creates a regional HealthCheckService resource in the specified project and region using the data included in the request.", - "flatPath": "projects/{project}/regions/{region}/healthCheckServices", - "httpMethod": "POST", - "id": "compute.regionHealthCheckServices.insert", - "parameterOrder": [ - "project", - "region" - ], - "parameters": { "project": { "description": "Project ID for this request.", "location": "path", @@ -18564,7 +19411,7 @@ "type": "string" }, "region": { - "description": "Name of the region scoping this request.", + "description": "The name of the region for this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, @@ -18574,11 +19421,17 @@ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", "location": "query", "type": "string" + }, + "updateMask": { + "description": "update_mask indicates fields to be updated as part of this request.", + "format": "google-fieldmask", + "location": "query", + "type": "string" } }, - "path": "projects/{project}/regions/{region}/healthCheckServices", + "path": "projects/{project}/regions/{region}/disks/{disk}", "request": { - "$ref": "HealthCheckService" + "$ref": "Disk" }, "response": { "$ref": "Operation" @@ -18587,75 +19440,16 @@ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute" ] - }, - "list": { - "description": "Lists all the HealthCheckService resources that have been configured for the specified project in the given region.", - "flatPath": "projects/{project}/regions/{region}/healthCheckServices", - "httpMethod": "GET", - "id": "compute.regionHealthCheckServices.list", - "parameterOrder": [ - "project", - "region" - ], - "parameters": { - "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", - "location": "query", - "type": "string" - }, - "maxResults": { - "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", - "format": "uint32", - "location": "query", - "minimum": "0", - "type": "integer" - }, - "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", - "location": "query", - "type": "string" - }, - "pageToken": { - "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", - "location": "query", - "type": "string" - }, - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, - "type": "string" - }, - "region": { - "description": "Name of the region scoping this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - }, - "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", - "location": "query", - "type": "boolean" - } - }, - "path": "projects/{project}/regions/{region}/healthCheckServices", - "response": { - "$ref": "HealthCheckServicesList" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "patch": { - "description": "Updates the specified regional HealthCheckService resource with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + } + } + }, + "regionHealthCheckServices": { + "methods": { + "delete": { + "description": "Deletes the specified regional HealthCheckService.", "flatPath": "projects/{project}/regions/{region}/healthCheckServices/{healthCheckService}", - "httpMethod": "PATCH", - "id": "compute.regionHealthCheckServices.patch", + "httpMethod": "DELETE", + "id": "compute.regionHealthCheckServices.delete", "parameterOrder": [ "project", "region", @@ -18663,7 +19457,200 @@ ], "parameters": { "healthCheckService": { - "description": "Name of the HealthCheckService to update. The name must be 1-63 characters long, and comply with RFC1035.", + "description": "Name of the HealthCheckService to delete. The name must be 1-63 characters long, and comply with RFC1035.", + "location": "path", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "projects/{project}/regions/{region}/healthCheckServices/{healthCheckService}", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "description": "Returns the specified regional HealthCheckService resource.", + "flatPath": "projects/{project}/regions/{region}/healthCheckServices/{healthCheckService}", + "httpMethod": "GET", + "id": "compute.regionHealthCheckServices.get", + "parameterOrder": [ + "project", + "region", + "healthCheckService" + ], + "parameters": { + "healthCheckService": { + "description": "Name of the HealthCheckService to update. The name must be 1-63 characters long, and comply with RFC1035.", + "location": "path", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/regions/{region}/healthCheckServices/{healthCheckService}", + "response": { + "$ref": "HealthCheckService" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "description": "Creates a regional HealthCheckService resource in the specified project and region using the data included in the request.", + "flatPath": "projects/{project}/regions/{region}/healthCheckServices", + "httpMethod": "POST", + "id": "compute.regionHealthCheckServices.insert", + "parameterOrder": [ + "project", + "region" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "projects/{project}/regions/{region}/healthCheckServices", + "request": { + "$ref": "HealthCheckService" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "description": "Lists all the HealthCheckService resources that have been configured for the specified project in the given region.", + "flatPath": "projects/{project}/regions/{region}/healthCheckServices", + "httpMethod": "GET", + "id": "compute.regionHealthCheckServices.list", + "parameterOrder": [ + "project", + "region" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "location": "query", + "type": "string" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + "location": "query", + "type": "boolean" + } + }, + "path": "projects/{project}/regions/{region}/healthCheckServices", + "response": { + "$ref": "HealthCheckServicesList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "patch": { + "description": "Updates the specified regional HealthCheckService resource with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + "flatPath": "projects/{project}/regions/{region}/healthCheckServices/{healthCheckService}", + "httpMethod": "PATCH", + "id": "compute.regionHealthCheckServices.patch", + "parameterOrder": [ + "project", + "region", + "healthCheckService" + ], + "parameters": { + "healthCheckService": { + "description": "Name of the HealthCheckService to update. The name must be 1-63 characters long, and comply with RFC1035.", "location": "path", "required": true, "type": "string" @@ -18752,7 +19739,7 @@ ] }, "get": { - "description": "Returns the specified HealthCheck resource. Gets a list of available health checks by making a list() request.", + "description": "Returns the specified HealthCheck resource.", "flatPath": "projects/{project}/regions/{region}/healthChecks/{healthCheck}", "httpMethod": "GET", "id": "compute.regionHealthChecks.get", @@ -20200,68 +21187,23 @@ } } }, - "regionInstances": { - "methods": { - "bulkInsert": { - "description": "Creates multiple instances in a given region. Count specifies the number of instances to create.", - "flatPath": "projects/{project}/regions/{region}/instances/bulkInsert", - "httpMethod": "POST", - "id": "compute.regionInstances.bulkInsert", - "parameterOrder": [ - "project", - "region" - ], - "parameters": { - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, - "type": "string" - }, - "region": { - "description": "The name of the region for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - "location": "query", - "type": "string" - } - }, - "path": "projects/{project}/regions/{region}/instances/bulkInsert", - "request": { - "$ref": "BulkInsertInstanceResource" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - } - } - }, - "regionNetworkEndpointGroups": { + "regionInstanceTemplates": { "methods": { "delete": { - "description": "Deletes the specified network endpoint group. Note that the NEG cannot be deleted if it is configured as a backend of a backend service.", - "flatPath": "projects/{project}/regions/{region}/networkEndpointGroups/{networkEndpointGroup}", + "description": "Deletes the specified instance template. Deleting an instance template is permanent and cannot be undone.", + "flatPath": "projects/{project}/regions/{region}/instanceTemplates/{instanceTemplate}", "httpMethod": "DELETE", - "id": "compute.regionNetworkEndpointGroups.delete", + "id": "compute.regionInstanceTemplates.delete", "parameterOrder": [ "project", "region", - "networkEndpointGroup" + "instanceTemplate" ], "parameters": { - "networkEndpointGroup": { - "description": "The name of the network endpoint group to delete. It should comply with RFC1035.", + "instanceTemplate": { + "description": "The name of the instance template to delete.", "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -20273,8 +21215,9 @@ "type": "string" }, "region": { - "description": "The name of the region where the network endpoint group is located. It should comply with RFC1035.", + "description": "The name of the region for this request.", "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, @@ -20284,7 +21227,7 @@ "type": "string" } }, - "path": "projects/{project}/regions/{region}/networkEndpointGroups/{networkEndpointGroup}", + "path": "projects/{project}/regions/{region}/instanceTemplates/{instanceTemplate}", "response": { "$ref": "Operation" }, @@ -20294,19 +21237,20 @@ ] }, "get": { - "description": "Returns the specified network endpoint group. Gets a list of available network endpoint groups by making a list() request.", - "flatPath": "projects/{project}/regions/{region}/networkEndpointGroups/{networkEndpointGroup}", + "description": "Returns the specified instance template.", + "flatPath": "projects/{project}/regions/{region}/instanceTemplates/{instanceTemplate}", "httpMethod": "GET", - "id": "compute.regionNetworkEndpointGroups.get", + "id": "compute.regionInstanceTemplates.get", "parameterOrder": [ "project", "region", - "networkEndpointGroup" + "instanceTemplate" ], "parameters": { - "networkEndpointGroup": { - "description": "The name of the network endpoint group. It should comply with RFC1035.", + "instanceTemplate": { + "description": "The name of the instance template.", "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -20318,15 +21262,16 @@ "type": "string" }, "region": { - "description": "The name of the region where the network endpoint group is located. It should comply with RFC1035.", + "description": "The name of the region for this request.", "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" } }, - "path": "projects/{project}/regions/{region}/networkEndpointGroups/{networkEndpointGroup}", + "path": "projects/{project}/regions/{region}/instanceTemplates/{instanceTemplate}", "response": { - "$ref": "NetworkEndpointGroup" + "$ref": "InstanceTemplate" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -20335,10 +21280,10 @@ ] }, "insert": { - "description": "Creates a network endpoint group in the specified project using the parameters that are included in the request.", - "flatPath": "projects/{project}/regions/{region}/networkEndpointGroups", + "description": "Creates an instance template in the specified project and region using the global instance template whose URL is included in the request.", + "flatPath": "projects/{project}/regions/{region}/instanceTemplates", "httpMethod": "POST", - "id": "compute.regionNetworkEndpointGroups.insert", + "id": "compute.regionInstanceTemplates.insert", "parameterOrder": [ "project", "region" @@ -20352,8 +21297,9 @@ "type": "string" }, "region": { - "description": "The name of the region where you want to create the network endpoint group. It should comply with RFC1035.", + "description": "The name of the region for this request.", "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, @@ -20363,9 +21309,9 @@ "type": "string" } }, - "path": "projects/{project}/regions/{region}/networkEndpointGroups", + "path": "projects/{project}/regions/{region}/instanceTemplates", "request": { - "$ref": "NetworkEndpointGroup" + "$ref": "InstanceTemplate" }, "response": { "$ref": "Operation" @@ -20376,10 +21322,250 @@ ] }, "list": { - "description": "Retrieves the list of regional network endpoint groups available to the specified project in the given region.", - "flatPath": "projects/{project}/regions/{region}/networkEndpointGroups", + "description": "Retrieves a list of instance templates that are contained within the specified project and region.", + "flatPath": "projects/{project}/regions/{region}/instanceTemplates", "httpMethod": "GET", - "id": "compute.regionNetworkEndpointGroups.list", + "id": "compute.regionInstanceTemplates.list", + "parameterOrder": [ + "project", + "region" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "location": "query", + "type": "string" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "The name of the regions for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + "location": "query", + "type": "boolean" + } + }, + "path": "projects/{project}/regions/{region}/instanceTemplates", + "response": { + "$ref": "InstanceTemplateList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + } + } + }, + "regionInstances": { + "methods": { + "bulkInsert": { + "description": "Creates multiple instances in a given region. Count specifies the number of instances to create.", + "flatPath": "projects/{project}/regions/{region}/instances/bulkInsert", + "httpMethod": "POST", + "id": "compute.regionInstances.bulkInsert", + "parameterOrder": [ + "project", + "region" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "The name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "projects/{project}/regions/{region}/instances/bulkInsert", + "request": { + "$ref": "BulkInsertInstanceResource" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + } + } + }, + "regionNetworkEndpointGroups": { + "methods": { + "delete": { + "description": "Deletes the specified network endpoint group. Note that the NEG cannot be deleted if it is configured as a backend of a backend service.", + "flatPath": "projects/{project}/regions/{region}/networkEndpointGroups/{networkEndpointGroup}", + "httpMethod": "DELETE", + "id": "compute.regionNetworkEndpointGroups.delete", + "parameterOrder": [ + "project", + "region", + "networkEndpointGroup" + ], + "parameters": { + "networkEndpointGroup": { + "description": "The name of the network endpoint group to delete. It should comply with RFC1035.", + "location": "path", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "The name of the region where the network endpoint group is located. It should comply with RFC1035.", + "location": "path", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "projects/{project}/regions/{region}/networkEndpointGroups/{networkEndpointGroup}", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "description": "Returns the specified network endpoint group.", + "flatPath": "projects/{project}/regions/{region}/networkEndpointGroups/{networkEndpointGroup}", + "httpMethod": "GET", + "id": "compute.regionNetworkEndpointGroups.get", + "parameterOrder": [ + "project", + "region", + "networkEndpointGroup" + ], + "parameters": { + "networkEndpointGroup": { + "description": "The name of the network endpoint group. It should comply with RFC1035.", + "location": "path", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "The name of the region where the network endpoint group is located. It should comply with RFC1035.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/regions/{region}/networkEndpointGroups/{networkEndpointGroup}", + "response": { + "$ref": "NetworkEndpointGroup" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "description": "Creates a network endpoint group in the specified project using the parameters that are included in the request.", + "flatPath": "projects/{project}/regions/{region}/networkEndpointGroups", + "httpMethod": "POST", + "id": "compute.regionNetworkEndpointGroups.insert", + "parameterOrder": [ + "project", + "region" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "The name of the region where you want to create the network endpoint group. It should comply with RFC1035.", + "location": "path", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "projects/{project}/regions/{region}/networkEndpointGroups", + "request": { + "$ref": "NetworkEndpointGroup" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "description": "Retrieves the list of regional network endpoint groups available to the specified project in the given region.", + "flatPath": "projects/{project}/regions/{region}/networkEndpointGroups", + "httpMethod": "GET", + "id": "compute.regionNetworkEndpointGroups.list", "parameterOrder": [ "project", "region" @@ -21891,7 +23077,7 @@ ] }, "patch": { - "description": "Patches the specified policy with the data included in the request.", + "description": "Patches the specified policy with the data included in the request. To clear fields in the rule, leave the fields empty and specify them in the updateMask. This cannot be used to be update the rules in the policy. Please use the per rule methods like addRule, patchRule, and removeRule instead.", "flatPath": "projects/{project}/regions/{region}/securityPolicies/{securityPolicy}", "httpMethod": "PATCH", "id": "compute.regionSecurityPolicies.patch", @@ -22141,17 +23327,17 @@ } } }, - "regionTargetHttpProxies": { + "regionSslPolicies": { "methods": { "delete": { - "description": "Deletes the specified TargetHttpProxy resource.", - "flatPath": "projects/{project}/regions/{region}/targetHttpProxies/{targetHttpProxy}", + "description": "Deletes the specified SSL policy. The SSL policy resource can be deleted only if it is not in use by any TargetHttpsProxy or TargetSslProxy resources.", + "flatPath": "projects/{project}/regions/{region}/sslPolicies/{sslPolicy}", "httpMethod": "DELETE", - "id": "compute.regionTargetHttpProxies.delete", + "id": "compute.regionSslPolicies.delete", "parameterOrder": [ "project", "region", - "targetHttpProxy" + "sslPolicy" ], "parameters": { "project": { @@ -22173,15 +23359,14 @@ "location": "query", "type": "string" }, - "targetHttpProxy": { - "description": "Name of the TargetHttpProxy resource to delete.", + "sslPolicy": { + "description": "Name of the SSL policy to delete. The name must be 1-63 characters long, and comply with RFC1035.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "projects/{project}/regions/{region}/targetHttpProxies/{targetHttpProxy}", + "path": "projects/{project}/regions/{region}/sslPolicies/{sslPolicy}", "response": { "$ref": "Operation" }, @@ -22191,14 +23376,14 @@ ] }, "get": { - "description": "Returns the specified TargetHttpProxy resource in the specified region. Gets a list of available target HTTP proxies by making a list() request.", - "flatPath": "projects/{project}/regions/{region}/targetHttpProxies/{targetHttpProxy}", + "description": "Lists all of the ordered rules present in a single specified policy.", + "flatPath": "projects/{project}/regions/{region}/sslPolicies/{sslPolicy}", "httpMethod": "GET", - "id": "compute.regionTargetHttpProxies.get", + "id": "compute.regionSslPolicies.get", "parameterOrder": [ "project", "region", - "targetHttpProxy" + "sslPolicy" ], "parameters": { "project": { @@ -22215,17 +23400,16 @@ "required": true, "type": "string" }, - "targetHttpProxy": { - "description": "Name of the TargetHttpProxy resource to return.", + "sslPolicy": { + "description": "Name of the SSL policy to update. The name must be 1-63 characters long, and comply with RFC1035.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "projects/{project}/regions/{region}/targetHttpProxies/{targetHttpProxy}", + "path": "projects/{project}/regions/{region}/sslPolicies/{sslPolicy}", "response": { - "$ref": "TargetHttpProxy" + "$ref": "SslPolicy" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -22234,10 +23418,10 @@ ] }, "insert": { - "description": "Creates a TargetHttpProxy resource in the specified project and region using the data included in the request.", - "flatPath": "projects/{project}/regions/{region}/targetHttpProxies", + "description": "Creates a new policy in the specified project and region using the data included in the request.", + "flatPath": "projects/{project}/regions/{region}/sslPolicies", "httpMethod": "POST", - "id": "compute.regionTargetHttpProxies.insert", + "id": "compute.regionSslPolicies.insert", "parameterOrder": [ "project", "region" @@ -22263,9 +23447,9 @@ "type": "string" } }, - "path": "projects/{project}/regions/{region}/targetHttpProxies", + "path": "projects/{project}/regions/{region}/sslPolicies", "request": { - "$ref": "TargetHttpProxy" + "$ref": "SslPolicy" }, "response": { "$ref": "Operation" @@ -22276,10 +23460,10 @@ ] }, "list": { - "description": "Retrieves the list of TargetHttpProxy resources available to the specified project in the specified region.", - "flatPath": "projects/{project}/regions/{region}/targetHttpProxies", + "description": "Lists all the SSL policies that have been configured for the specified project and region.", + "flatPath": "projects/{project}/regions/{region}/sslPolicies", "httpMethod": "GET", - "id": "compute.regionTargetHttpProxies.list", + "id": "compute.regionSslPolicies.list", "parameterOrder": [ "project", "region" @@ -22328,9 +23512,9 @@ "type": "boolean" } }, - "path": "projects/{project}/regions/{region}/targetHttpProxies", + "path": "projects/{project}/regions/{region}/sslPolicies", "response": { - "$ref": "TargetHttpProxyList" + "$ref": "SslPoliciesList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -22338,15 +23522,78 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "setUrlMap": { - "description": "Changes the URL map for TargetHttpProxy.", - "flatPath": "projects/{project}/regions/{region}/targetHttpProxies/{targetHttpProxy}/setUrlMap", - "httpMethod": "POST", - "id": "compute.regionTargetHttpProxies.setUrlMap", + "listAvailableFeatures": { + "description": "Lists all features that can be specified in the SSL policy when using custom profile.", + "flatPath": "projects/{project}/regions/{region}/sslPolicies/listAvailableFeatures", + "httpMethod": "GET", + "id": "compute.regionSslPolicies.listAvailableFeatures", + "parameterOrder": [ + "project", + "region" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "location": "query", + "type": "string" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + "location": "query", + "type": "boolean" + } + }, + "path": "projects/{project}/regions/{region}/sslPolicies/listAvailableFeatures", + "response": { + "$ref": "SslPoliciesListAvailableFeaturesResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "patch": { + "description": "Patches the specified SSL policy with the data included in the request.", + "flatPath": "projects/{project}/regions/{region}/sslPolicies/{sslPolicy}", + "httpMethod": "PATCH", + "id": "compute.regionSslPolicies.patch", "parameterOrder": [ "project", "region", - "targetHttpProxy" + "sslPolicy" ], "parameters": { "project": { @@ -22368,17 +23615,16 @@ "location": "query", "type": "string" }, - "targetHttpProxy": { - "description": "Name of the TargetHttpProxy to set a URL map for.", + "sslPolicy": { + "description": "Name of the SSL policy to update. The name must be 1-63 characters long, and comply with RFC1035.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "projects/{project}/regions/{region}/targetHttpProxies/{targetHttpProxy}/setUrlMap", + "path": "projects/{project}/regions/{region}/sslPolicies/{sslPolicy}", "request": { - "$ref": "UrlMapReference" + "$ref": "SslPolicy" }, "response": { "$ref": "Operation" @@ -22390,17 +23636,17 @@ } } }, - "regionTargetHttpsProxies": { + "regionTargetHttpProxies": { "methods": { "delete": { - "description": "Deletes the specified TargetHttpsProxy resource.", - "flatPath": "projects/{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}", + "description": "Deletes the specified TargetHttpProxy resource.", + "flatPath": "projects/{project}/regions/{region}/targetHttpProxies/{targetHttpProxy}", "httpMethod": "DELETE", - "id": "compute.regionTargetHttpsProxies.delete", + "id": "compute.regionTargetHttpProxies.delete", "parameterOrder": [ "project", "region", - "targetHttpsProxy" + "targetHttpProxy" ], "parameters": { "project": { @@ -22422,15 +23668,15 @@ "location": "query", "type": "string" }, - "targetHttpsProxy": { - "description": "Name of the TargetHttpsProxy resource to delete.", + "targetHttpProxy": { + "description": "Name of the TargetHttpProxy resource to delete.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "projects/{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}", + "path": "projects/{project}/regions/{region}/targetHttpProxies/{targetHttpProxy}", "response": { "$ref": "Operation" }, @@ -22440,14 +23686,14 @@ ] }, "get": { - "description": "Returns the specified TargetHttpsProxy resource in the specified region. Gets a list of available target HTTP proxies by making a list() request.", - "flatPath": "projects/{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}", + "description": "Returns the specified TargetHttpProxy resource in the specified region.", + "flatPath": "projects/{project}/regions/{region}/targetHttpProxies/{targetHttpProxy}", "httpMethod": "GET", - "id": "compute.regionTargetHttpsProxies.get", + "id": "compute.regionTargetHttpProxies.get", "parameterOrder": [ "project", "region", - "targetHttpsProxy" + "targetHttpProxy" ], "parameters": { "project": { @@ -22464,17 +23710,17 @@ "required": true, "type": "string" }, - "targetHttpsProxy": { - "description": "Name of the TargetHttpsProxy resource to return.", + "targetHttpProxy": { + "description": "Name of the TargetHttpProxy resource to return.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "projects/{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}", + "path": "projects/{project}/regions/{region}/targetHttpProxies/{targetHttpProxy}", "response": { - "$ref": "TargetHttpsProxy" + "$ref": "TargetHttpProxy" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -22483,10 +23729,10 @@ ] }, "insert": { - "description": "Creates a TargetHttpsProxy resource in the specified project and region using the data included in the request.", - "flatPath": "projects/{project}/regions/{region}/targetHttpsProxies", + "description": "Creates a TargetHttpProxy resource in the specified project and region using the data included in the request.", + "flatPath": "projects/{project}/regions/{region}/targetHttpProxies", "httpMethod": "POST", - "id": "compute.regionTargetHttpsProxies.insert", + "id": "compute.regionTargetHttpProxies.insert", "parameterOrder": [ "project", "region" @@ -22512,9 +23758,9 @@ "type": "string" } }, - "path": "projects/{project}/regions/{region}/targetHttpsProxies", + "path": "projects/{project}/regions/{region}/targetHttpProxies", "request": { - "$ref": "TargetHttpsProxy" + "$ref": "TargetHttpProxy" }, "response": { "$ref": "Operation" @@ -22525,10 +23771,10 @@ ] }, "list": { - "description": "Retrieves the list of TargetHttpsProxy resources available to the specified project in the specified region.", - "flatPath": "projects/{project}/regions/{region}/targetHttpsProxies", + "description": "Retrieves the list of TargetHttpProxy resources available to the specified project in the specified region.", + "flatPath": "projects/{project}/regions/{region}/targetHttpProxies", "httpMethod": "GET", - "id": "compute.regionTargetHttpsProxies.list", + "id": "compute.regionTargetHttpProxies.list", "parameterOrder": [ "project", "region" @@ -22577,9 +23823,9 @@ "type": "boolean" } }, - "path": "projects/{project}/regions/{region}/targetHttpsProxies", + "path": "projects/{project}/regions/{region}/targetHttpProxies", "response": { - "$ref": "TargetHttpsProxyList" + "$ref": "TargetHttpProxyList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -22587,115 +23833,15 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "patch": { - "description": "Patches the specified regional TargetHttpsProxy resource with the data included in the request. This method supports PATCH semantics and uses JSON merge patch format and processing rules.", - "flatPath": "projects/{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}", - "httpMethod": "PATCH", - "id": "compute.regionTargetHttpsProxies.patch", - "parameterOrder": [ - "project", - "region", - "targetHttpsProxy" - ], - "parameters": { - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, - "type": "string" - }, - "region": { - "description": "Name of the region for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - "location": "query", - "type": "string" - }, - "targetHttpsProxy": { - "description": "Name of the TargetHttpsProxy resource to patch.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "required": true, - "type": "string" - } - }, - "path": "projects/{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}", - "request": { - "$ref": "TargetHttpsProxy" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "setSslCertificates": { - "description": "Replaces SslCertificates for TargetHttpsProxy.", - "flatPath": "projects/{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}/setSslCertificates", - "httpMethod": "POST", - "id": "compute.regionTargetHttpsProxies.setSslCertificates", - "parameterOrder": [ - "project", - "region", - "targetHttpsProxy" - ], - "parameters": { - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, - "type": "string" - }, - "region": { - "description": "Name of the region scoping this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - "location": "query", - "type": "string" - }, - "targetHttpsProxy": { - "description": "Name of the TargetHttpsProxy resource to set an SslCertificates resource for.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "required": true, - "type": "string" - } - }, - "path": "projects/{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}/setSslCertificates", - "request": { - "$ref": "RegionTargetHttpsProxiesSetSslCertificatesRequest" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, "setUrlMap": { - "description": "Changes the URL map for TargetHttpsProxy.", - "flatPath": "projects/{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}/setUrlMap", + "description": "Changes the URL map for TargetHttpProxy.", + "flatPath": "projects/{project}/regions/{region}/targetHttpProxies/{targetHttpProxy}/setUrlMap", "httpMethod": "POST", - "id": "compute.regionTargetHttpsProxies.setUrlMap", + "id": "compute.regionTargetHttpProxies.setUrlMap", "parameterOrder": [ "project", "region", - "targetHttpsProxy" + "targetHttpProxy" ], "parameters": { "project": { @@ -22717,15 +23863,15 @@ "location": "query", "type": "string" }, - "targetHttpsProxy": { - "description": "Name of the TargetHttpsProxy to set a URL map for.", + "targetHttpProxy": { + "description": "Name of the TargetHttpProxy to set a URL map for.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "projects/{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}/setUrlMap", + "path": "projects/{project}/regions/{region}/targetHttpProxies/{targetHttpProxy}/setUrlMap", "request": { "$ref": "UrlMapReference" }, @@ -22739,17 +23885,17 @@ } } }, - "regionUrlMaps": { + "regionTargetHttpsProxies": { "methods": { "delete": { - "description": "Deletes the specified UrlMap resource.", - "flatPath": "projects/{project}/regions/{region}/urlMaps/{urlMap}", + "description": "Deletes the specified TargetHttpsProxy resource.", + "flatPath": "projects/{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}", "httpMethod": "DELETE", - "id": "compute.regionUrlMaps.delete", + "id": "compute.regionTargetHttpsProxies.delete", "parameterOrder": [ "project", "region", - "urlMap" + "targetHttpsProxy" ], "parameters": { "project": { @@ -22767,19 +23913,19 @@ "type": "string" }, "requestId": { - "description": "begin_interface: MixerMutationRequestBuilder Request ID to support idempotency.", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", "location": "query", "type": "string" }, - "urlMap": { - "description": "Name of the UrlMap resource to delete.", + "targetHttpsProxy": { + "description": "Name of the TargetHttpsProxy resource to delete.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "projects/{project}/regions/{region}/urlMaps/{urlMap}", + "path": "projects/{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}", "response": { "$ref": "Operation" }, @@ -22789,14 +23935,14 @@ ] }, "get": { - "description": "Returns the specified UrlMap resource. Gets a list of available URL maps by making a list() request.", - "flatPath": "projects/{project}/regions/{region}/urlMaps/{urlMap}", + "description": "Returns the specified TargetHttpsProxy resource in the specified region.", + "flatPath": "projects/{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}", "httpMethod": "GET", - "id": "compute.regionUrlMaps.get", + "id": "compute.regionTargetHttpsProxies.get", "parameterOrder": [ "project", "region", - "urlMap" + "targetHttpsProxy" ], "parameters": { "project": { @@ -22813,17 +23959,17 @@ "required": true, "type": "string" }, - "urlMap": { - "description": "Name of the UrlMap resource to return.", + "targetHttpsProxy": { + "description": "Name of the TargetHttpsProxy resource to return.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "projects/{project}/regions/{region}/urlMaps/{urlMap}", + "path": "projects/{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}", "response": { - "$ref": "UrlMap" + "$ref": "TargetHttpsProxy" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -22832,10 +23978,10 @@ ] }, "insert": { - "description": "Creates a UrlMap resource in the specified project using the data included in the request.", - "flatPath": "projects/{project}/regions/{region}/urlMaps", + "description": "Creates a TargetHttpsProxy resource in the specified project and region using the data included in the request.", + "flatPath": "projects/{project}/regions/{region}/targetHttpsProxies", "httpMethod": "POST", - "id": "compute.regionUrlMaps.insert", + "id": "compute.regionTargetHttpsProxies.insert", "parameterOrder": [ "project", "region" @@ -22856,14 +24002,14 @@ "type": "string" }, "requestId": { - "description": "begin_interface: MixerMutationRequestBuilder Request ID to support idempotency.", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", "location": "query", "type": "string" } }, - "path": "projects/{project}/regions/{region}/urlMaps", + "path": "projects/{project}/regions/{region}/targetHttpsProxies", "request": { - "$ref": "UrlMap" + "$ref": "TargetHttpsProxy" }, "response": { "$ref": "Operation" @@ -22874,10 +24020,558 @@ ] }, "list": { - "description": "Retrieves the list of UrlMap resources available to the specified project in the specified region.", - "flatPath": "projects/{project}/regions/{region}/urlMaps", + "description": "Retrieves the list of TargetHttpsProxy resources available to the specified project in the specified region.", + "flatPath": "projects/{project}/regions/{region}/targetHttpsProxies", "httpMethod": "GET", - "id": "compute.regionUrlMaps.list", + "id": "compute.regionTargetHttpsProxies.list", + "parameterOrder": [ + "project", + "region" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "location": "query", + "type": "string" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + "location": "query", + "type": "boolean" + } + }, + "path": "projects/{project}/regions/{region}/targetHttpsProxies", + "response": { + "$ref": "TargetHttpsProxyList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "patch": { + "description": "Patches the specified regional TargetHttpsProxy resource with the data included in the request. This method supports PATCH semantics and uses JSON merge patch format and processing rules.", + "flatPath": "projects/{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}", + "httpMethod": "PATCH", + "id": "compute.regionTargetHttpsProxies.patch", + "parameterOrder": [ + "project", + "region", + "targetHttpsProxy" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "targetHttpsProxy": { + "description": "Name of the TargetHttpsProxy resource to patch.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}", + "request": { + "$ref": "TargetHttpsProxy" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "setSslCertificates": { + "description": "Replaces SslCertificates for TargetHttpsProxy.", + "flatPath": "projects/{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}/setSslCertificates", + "httpMethod": "POST", + "id": "compute.regionTargetHttpsProxies.setSslCertificates", + "parameterOrder": [ + "project", + "region", + "targetHttpsProxy" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "targetHttpsProxy": { + "description": "Name of the TargetHttpsProxy resource to set an SslCertificates resource for.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}/setSslCertificates", + "request": { + "$ref": "RegionTargetHttpsProxiesSetSslCertificatesRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "setUrlMap": { + "description": "Changes the URL map for TargetHttpsProxy.", + "flatPath": "projects/{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}/setUrlMap", + "httpMethod": "POST", + "id": "compute.regionTargetHttpsProxies.setUrlMap", + "parameterOrder": [ + "project", + "region", + "targetHttpsProxy" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "targetHttpsProxy": { + "description": "Name of the TargetHttpsProxy to set a URL map for.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}/setUrlMap", + "request": { + "$ref": "UrlMapReference" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + } + } + }, + "regionTargetTcpProxies": { + "methods": { + "delete": { + "description": "Deletes the specified TargetTcpProxy resource.", + "flatPath": "projects/{project}/regions/{region}/targetTcpProxies/{targetTcpProxy}", + "httpMethod": "DELETE", + "id": "compute.regionTargetTcpProxies.delete", + "parameterOrder": [ + "project", + "region", + "targetTcpProxy" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "targetTcpProxy": { + "description": "Name of the TargetTcpProxy resource to delete.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/regions/{region}/targetTcpProxies/{targetTcpProxy}", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "description": "Returns the specified TargetTcpProxy resource.", + "flatPath": "projects/{project}/regions/{region}/targetTcpProxies/{targetTcpProxy}", + "httpMethod": "GET", + "id": "compute.regionTargetTcpProxies.get", + "parameterOrder": [ + "project", + "region", + "targetTcpProxy" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "targetTcpProxy": { + "description": "Name of the TargetTcpProxy resource to return.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/regions/{region}/targetTcpProxies/{targetTcpProxy}", + "response": { + "$ref": "TargetTcpProxy" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "description": "Creates a TargetTcpProxy resource in the specified project and region using the data included in the request.", + "flatPath": "projects/{project}/regions/{region}/targetTcpProxies", + "httpMethod": "POST", + "id": "compute.regionTargetTcpProxies.insert", + "parameterOrder": [ + "project", + "region" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "projects/{project}/regions/{region}/targetTcpProxies", + "request": { + "$ref": "TargetTcpProxy" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "description": "Retrieves a list of TargetTcpProxy resources available to the specified project in a given region.", + "flatPath": "projects/{project}/regions/{region}/targetTcpProxies", + "httpMethod": "GET", + "id": "compute.regionTargetTcpProxies.list", + "parameterOrder": [ + "project", + "region" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "location": "query", + "type": "string" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + "location": "query", + "type": "boolean" + } + }, + "path": "projects/{project}/regions/{region}/targetTcpProxies", + "response": { + "$ref": "TargetTcpProxyList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + } + } + }, + "regionUrlMaps": { + "methods": { + "delete": { + "description": "Deletes the specified UrlMap resource.", + "flatPath": "projects/{project}/regions/{region}/urlMaps/{urlMap}", + "httpMethod": "DELETE", + "id": "compute.regionUrlMaps.delete", + "parameterOrder": [ + "project", + "region", + "urlMap" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "begin_interface: MixerMutationRequestBuilder Request ID to support idempotency.", + "location": "query", + "type": "string" + }, + "urlMap": { + "description": "Name of the UrlMap resource to delete.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/regions/{region}/urlMaps/{urlMap}", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "description": "Returns the specified UrlMap resource.", + "flatPath": "projects/{project}/regions/{region}/urlMaps/{urlMap}", + "httpMethod": "GET", + "id": "compute.regionUrlMaps.get", + "parameterOrder": [ + "project", + "region", + "urlMap" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "urlMap": { + "description": "Name of the UrlMap resource to return.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/regions/{region}/urlMaps/{urlMap}", + "response": { + "$ref": "UrlMap" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "description": "Creates a UrlMap resource in the specified project using the data included in the request.", + "flatPath": "projects/{project}/regions/{region}/urlMaps", + "httpMethod": "POST", + "id": "compute.regionUrlMaps.insert", + "parameterOrder": [ + "project", + "region" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "begin_interface: MixerMutationRequestBuilder Request ID to support idempotency.", + "location": "query", + "type": "string" + } + }, + "path": "projects/{project}/regions/{region}/urlMaps", + "request": { + "$ref": "UrlMap" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "description": "Retrieves the list of UrlMap resources available to the specified project in the specified region.", + "flatPath": "projects/{project}/regions/{region}/urlMaps", + "httpMethod": "GET", + "id": "compute.regionUrlMaps.list", "parameterOrder": [ "project", "region" @@ -23086,7 +24780,7 @@ "regions": { "methods": { "get": { - "description": "Returns the specified Region resource. Gets a list of available regions by making a list() request. To decrease latency for this method, you can optionally omit any unneeded information from the response by using a field mask. This practice is especially recommended for unused quota information (the `quotas` field). To exclude one or more fields, set your request's `fields` query parameter to only include the fields you need. For example, to only include the `id` and `selfLink` fields, add the query parameter `?fields=id,selfLink` to your request.", + "description": "Returns the specified Region resource. To decrease latency for this method, you can optionally omit any unneeded information from the response by using a field mask. This practice is especially recommended for unused quota information (the `quotas` field). To exclude one or more fields, set your request's `fields` query parameter to only include the fields you need. For example, to only include the `id` and `selfLink` fields, add the query parameter `?fields=id,selfLink` to your request.", "flatPath": "projects/{project}/regions/{region}", "httpMethod": "GET", "id": "compute.regions.get", @@ -24196,7 +25890,7 @@ ] }, "get": { - "description": "Returns the specified Router resource. Gets a list of available routers by making a list() request.", + "description": "Returns the specified Router resource.", "flatPath": "projects/{project}/regions/{region}/routers/{router}", "httpMethod": "GET", "id": "compute.routers.get", @@ -24647,7 +26341,7 @@ ] }, "get": { - "description": "Returns the specified Route resource. Gets a list of available routes by making a list() request.", + "description": "Returns the specified Route resource.", "flatPath": "projects/{project}/global/routes/{route}", "httpMethod": "GET", "id": "compute.routes.get", @@ -25141,7 +26835,7 @@ ] }, "patch": { - "description": "Patches the specified policy with the data included in the request. This cannot be used to be update the rules in the policy. Please use the per rule methods like addRule, patchRule, and removeRule instead.", + "description": "Patches the specified policy with the data included in the request. To clear fields in the rule, leave the fields empty and specify them in the updateMask. This cannot be used to be update the rules in the policy. Please use the per rule methods like addRule, patchRule, and removeRule instead.", "flatPath": "projects/{project}/global/securityPolicies/{securityPolicy}", "httpMethod": "PATCH", "id": "compute.securityPolicies.patch", @@ -25269,6 +26963,43 @@ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute" ] + }, + "setLabels": { + "description": "Sets the labels on a security policy. To learn more about labels, read the Labeling Resources documentation.", + "flatPath": "projects/{project}/global/securityPolicies/{resource}/setLabels", + "httpMethod": "POST", + "id": "compute.securityPolicies.setLabels", + "parameterOrder": [ + "project", + "resource" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "resource": { + "description": "Name or id of the resource for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/global/securityPolicies/{resource}/setLabels", + "request": { + "$ref": "GlobalSetLabelsRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] } } }, @@ -25761,7 +27492,7 @@ ] }, "get": { - "description": "Returns the specified Snapshot resource. Gets a list of available snapshots by making a list() request.", + "description": "Returns the specified Snapshot resource.", "flatPath": "projects/{project}/global/snapshots/{snapshot}", "httpMethod": "GET", "id": "compute.snapshots.get", @@ -26141,7 +27872,7 @@ ] }, "get": { - "description": "Returns the specified SslCertificate resource. Gets a list of available SSL certificates by making a list() request.", + "description": "Returns the specified SslCertificate resource.", "flatPath": "projects/{project}/global/sslCertificates/{sslCertificate}", "httpMethod": "GET", "id": "compute.sslCertificates.get", @@ -26268,6 +27999,66 @@ }, "sslPolicies": { "methods": { + "aggregatedList": { + "description": "Retrieves the list of all SslPolicy resources, regional and global, available to the specified project.", + "flatPath": "projects/{project}/aggregated/sslPolicies", + "httpMethod": "GET", + "id": "compute.sslPolicies.aggregatedList", + "parameterOrder": [ + "project" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "location": "query", + "type": "string" + }, + "includeAllScopes": { + "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + "location": "query", + "type": "boolean" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Name of the project scoping this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + "location": "query", + "type": "boolean" + } + }, + "path": "projects/{project}/aggregated/sslPolicies", + "response": { + "$ref": "SslPoliciesAggregatedList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, "delete": { "description": "Deletes the specified SSL policy. The SSL policy resource can be deleted only if it is not in use by any TargetHttpsProxy or TargetSslProxy resources.", "flatPath": "projects/{project}/global/sslPolicies/{sslPolicy}", @@ -26341,7 +28132,7 @@ ] }, "insert": { - "description": "Returns the specified SSL policy resource. Gets a list of available SSL policies by making a list() request.", + "description": "Returns the specified SSL policy resource.", "flatPath": "projects/{project}/global/sslPolicies", "httpMethod": "POST", "id": "compute.sslPolicies.insert", @@ -26687,7 +28478,7 @@ ] }, "get": { - "description": "Returns the specified subnetwork. Gets a list of available subnetworks list() request.", + "description": "Returns the specified subnetwork.", "flatPath": "projects/{project}/regions/{region}/subnetworks/{subnetwork}", "httpMethod": "GET", "id": "compute.subnetworks.get", @@ -27448,7 +29239,7 @@ ] }, "get": { - "description": "Returns the specified TargetHttpProxy resource. Gets a list of available target HTTP proxies by making a list() request.", + "description": "Returns the specified TargetHttpProxy resource.", "flatPath": "projects/{project}/global/targetHttpProxies/{targetHttpProxy}", "httpMethod": "GET", "id": "compute.targetHttpProxies.get", @@ -27759,7 +29550,7 @@ ] }, "get": { - "description": "Returns the specified TargetHttpsProxy resource. Gets a list of available target HTTPS proxies by making a list() request.", + "description": "Returns the specified TargetHttpsProxy resource.", "flatPath": "projects/{project}/global/targetHttpsProxies/{targetHttpsProxy}", "httpMethod": "GET", "id": "compute.targetHttpsProxies.get", @@ -28243,7 +30034,7 @@ ] }, "get": { - "description": "Returns the specified TargetInstance resource. Gets a list of available target instances by making a list() request.", + "description": "Returns the specified TargetInstance resource.", "flatPath": "projects/{project}/zones/{zone}/targetInstances/{targetInstance}", "httpMethod": "GET", "id": "compute.targetInstances.get", @@ -28602,7 +30393,7 @@ ] }, "get": { - "description": "Returns the specified target pool. Gets a list of available target pools by making a list() request.", + "description": "Returns the specified target pool.", "flatPath": "projects/{project}/regions/{region}/targetPools/{targetPool}", "httpMethod": "GET", "id": "compute.targetPools.get", @@ -28995,7 +30786,7 @@ ] }, "get": { - "description": "Returns the specified TargetSslProxy resource. Gets a list of available target SSL proxies by making a list() request.", + "description": "Returns the specified TargetSslProxy resource.", "flatPath": "projects/{project}/global/targetSslProxies/{targetSslProxy}", "httpMethod": "GET", "id": "compute.targetSslProxies.get", @@ -29330,6 +31121,66 @@ }, "targetTcpProxies": { "methods": { + "aggregatedList": { + "description": "Retrieves the list of all TargetTcpProxy resources, regional and global, available to the specified project.", + "flatPath": "projects/{project}/aggregated/targetTcpProxies", + "httpMethod": "GET", + "id": "compute.targetTcpProxies.aggregatedList", + "parameterOrder": [ + "project" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "location": "query", + "type": "string" + }, + "includeAllScopes": { + "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + "location": "query", + "type": "boolean" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Name of the project scoping this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + "location": "query", + "type": "boolean" + } + }, + "path": "projects/{project}/aggregated/targetTcpProxies", + "response": { + "$ref": "TargetTcpProxyAggregatedList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, "delete": { "description": "Deletes the specified TargetTcpProxy resource.", "flatPath": "projects/{project}/global/targetTcpProxies/{targetTcpProxy}", @@ -29370,7 +31221,7 @@ ] }, "get": { - "description": "Returns the specified TargetTcpProxy resource. Gets a list of available target TCP proxies by making a list() request.", + "description": "Returns the specified TargetTcpProxy resource.", "flatPath": "projects/{project}/global/targetTcpProxies/{targetTcpProxy}", "httpMethod": "GET", "id": "compute.targetTcpProxies.get", @@ -29689,7 +31540,7 @@ ] }, "get": { - "description": "Returns the specified target VPN gateway. Gets a list of available target VPN gateways by making a list() request.", + "description": "Returns the specified target VPN gateway.", "flatPath": "projects/{project}/regions/{region}/targetVpnGateways/{targetVpnGateway}", "httpMethod": "GET", "id": "compute.targetVpnGateways.get", @@ -29835,6 +31686,56 @@ "https://www.googleapis.com/auth/compute", "https://www.googleapis.com/auth/compute.readonly" ] + }, + "setLabels": { + "description": "Sets the labels on a TargetVpnGateway. To learn more about labels, read the Labeling Resources documentation.", + "flatPath": "projects/{project}/regions/{region}/targetVpnGateways/{resource}/setLabels", + "httpMethod": "POST", + "id": "compute.targetVpnGateways.setLabels", + "parameterOrder": [ + "project", + "region", + "resource" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "The region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "resource": { + "description": "Name or id of the resource for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/regions/{region}/targetVpnGateways/{resource}/setLabels", + "request": { + "$ref": "RegionSetLabelsRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] } } }, @@ -29940,7 +31841,7 @@ ] }, "get": { - "description": "Returns the specified UrlMap resource. Gets a list of available URL maps by making a list() request.", + "description": "Returns the specified UrlMap resource.", "flatPath": "projects/{project}/global/urlMaps/{urlMap}", "httpMethod": "GET", "id": "compute.urlMaps.get", @@ -30338,7 +32239,7 @@ ] }, "get": { - "description": "Returns the specified VPN gateway. Gets a list of available VPN gateways by making a list() request.", + "description": "Returns the specified VPN gateway.", "flatPath": "projects/{project}/regions/{region}/vpnGateways/{vpnGateway}", "httpMethod": "GET", "id": "compute.vpnGateways.get", @@ -30678,9 +32579,204 @@ "type": "boolean" } }, - "path": "projects/{project}/aggregated/vpnTunnels", + "path": "projects/{project}/aggregated/vpnTunnels", + "response": { + "$ref": "VpnTunnelAggregatedList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "delete": { + "description": "Deletes the specified VpnTunnel resource.", + "flatPath": "projects/{project}/regions/{region}/vpnTunnels/{vpnTunnel}", + "httpMethod": "DELETE", + "id": "compute.vpnTunnels.delete", + "parameterOrder": [ + "project", + "region", + "vpnTunnel" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "vpnTunnel": { + "description": "Name of the VpnTunnel resource to delete.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/regions/{region}/vpnTunnels/{vpnTunnel}", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "description": "Returns the specified VpnTunnel resource.", + "flatPath": "projects/{project}/regions/{region}/vpnTunnels/{vpnTunnel}", + "httpMethod": "GET", + "id": "compute.vpnTunnels.get", + "parameterOrder": [ + "project", + "region", + "vpnTunnel" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "vpnTunnel": { + "description": "Name of the VpnTunnel resource to return.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/regions/{region}/vpnTunnels/{vpnTunnel}", + "response": { + "$ref": "VpnTunnel" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "description": "Creates a VpnTunnel resource in the specified project and region using the data included in the request.", + "flatPath": "projects/{project}/regions/{region}/vpnTunnels", + "httpMethod": "POST", + "id": "compute.vpnTunnels.insert", + "parameterOrder": [ + "project", + "region" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "projects/{project}/regions/{region}/vpnTunnels", + "request": { + "$ref": "VpnTunnel" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "description": "Retrieves a list of VpnTunnel resources contained in the specified project and region.", + "flatPath": "projects/{project}/regions/{region}/vpnTunnels", + "httpMethod": "GET", + "id": "compute.vpnTunnels.list", + "parameterOrder": [ + "project", + "region" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "location": "query", + "type": "string" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + "location": "query", + "type": "boolean" + } + }, + "path": "projects/{project}/regions/{region}/vpnTunnels", "response": { - "$ref": "VpnTunnelAggregatedList" + "$ref": "VpnTunnelList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -30688,15 +32784,15 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "delete": { - "description": "Deletes the specified VpnTunnel resource.", - "flatPath": "projects/{project}/regions/{region}/vpnTunnels/{vpnTunnel}", - "httpMethod": "DELETE", - "id": "compute.vpnTunnels.delete", + "setLabels": { + "description": "Sets the labels on a VpnTunnel. To learn more about labels, read the Labeling Resources documentation.", + "flatPath": "projects/{project}/regions/{region}/vpnTunnels/{resource}/setLabels", + "httpMethod": "POST", + "id": "compute.vpnTunnels.setLabels", "parameterOrder": [ "project", "region", - "vpnTunnel" + "resource" ], "parameters": { "project": { @@ -30707,7 +32803,7 @@ "type": "string" }, "region": { - "description": "Name of the region for this request.", + "description": "The region for this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, @@ -30718,99 +32814,17 @@ "location": "query", "type": "string" }, - "vpnTunnel": { - "description": "Name of the VpnTunnel resource to delete.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - "required": true, - "type": "string" - } - }, - "path": "projects/{project}/regions/{region}/vpnTunnels/{vpnTunnel}", - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "get": { - "description": "Returns the specified VpnTunnel resource. Gets a list of available VPN tunnels by making a list() request.", - "flatPath": "projects/{project}/regions/{region}/vpnTunnels/{vpnTunnel}", - "httpMethod": "GET", - "id": "compute.vpnTunnels.get", - "parameterOrder": [ - "project", - "region", - "vpnTunnel" - ], - "parameters": { - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, - "type": "string" - }, - "region": { - "description": "Name of the region for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - }, - "vpnTunnel": { - "description": "Name of the VpnTunnel resource to return.", + "resource": { + "description": "Name or id of the resource for this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" } }, - "path": "projects/{project}/regions/{region}/vpnTunnels/{vpnTunnel}", - "response": { - "$ref": "VpnTunnel" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "insert": { - "description": "Creates a VpnTunnel resource in the specified project and region using the data included in the request.", - "flatPath": "projects/{project}/regions/{region}/vpnTunnels", - "httpMethod": "POST", - "id": "compute.vpnTunnels.insert", - "parameterOrder": [ - "project", - "region" - ], - "parameters": { - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, - "type": "string" - }, - "region": { - "description": "Name of the region for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - "location": "query", - "type": "string" - } - }, - "path": "projects/{project}/regions/{region}/vpnTunnels", + "path": "projects/{project}/regions/{region}/vpnTunnels/{resource}/setLabels", "request": { - "$ref": "VpnTunnel" + "$ref": "RegionSetLabelsRequest" }, "response": { "$ref": "Operation" @@ -30819,69 +32833,6 @@ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute" ] - }, - "list": { - "description": "Retrieves a list of VpnTunnel resources contained in the specified project and region.", - "flatPath": "projects/{project}/regions/{region}/vpnTunnels", - "httpMethod": "GET", - "id": "compute.vpnTunnels.list", - "parameterOrder": [ - "project", - "region" - ], - "parameters": { - "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", - "location": "query", - "type": "string" - }, - "maxResults": { - "default": "500", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", - "format": "uint32", - "location": "query", - "minimum": "0", - "type": "integer" - }, - "orderBy": { - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", - "location": "query", - "type": "string" - }, - "pageToken": { - "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", - "location": "query", - "type": "string" - }, - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, - "type": "string" - }, - "region": { - "description": "Name of the region for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - }, - "returnPartialSuccess": { - "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", - "location": "query", - "type": "boolean" - } - }, - "path": "projects/{project}/regions/{region}/vpnTunnels", - "response": { - "$ref": "VpnTunnelList" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] } } }, @@ -31080,7 +33031,7 @@ "zones": { "methods": { "get": { - "description": "Returns the specified Zone resource. Gets a list of available zones by making a list() request.", + "description": "Returns the specified Zone resource.", "flatPath": "projects/{project}/zones/{zone}", "httpMethod": "GET", "id": "compute.zones.get", @@ -31172,7 +33123,7 @@ } } }, - "revision": "20220720", + "revision": "20230307", "rootUrl": "https://compute.googleapis.com/", "schemas": { "AcceleratorConfig": { @@ -31592,7 +33543,7 @@ "id": "AccessConfig", "properties": { "externalIpv6": { - "description": "The first IPv6 address of the external IPv6 range associated with this instance, prefix length is stored in externalIpv6PrefixLength in ipv6AccessConfig. The field is output only, an IPv6 address from a subnetwork associated with the instance will be allocated dynamically.", + "description": "The first IPv6 address of the external IPv6 range associated with this instance, prefix length is stored in externalIpv6PrefixLength in ipv6AccessConfig. To use a static external IP address, it must be unused and in the same region as the instance's zone. If not specified, Google Cloud will automatically assign an external IPv6 address from the instance's subnetwork.", "type": "string" }, "externalIpv6PrefixLength": { @@ -31702,6 +33653,18 @@ ], "type": "string" }, + "ipv6EndpointType": { + "description": "The endpoint type of this address, which should be VM or NETLB. This is used for deciding which type of endpoint this address can be used after the external IPv6 address reservation.", + "enum": [ + "NETLB", + "VM" + ], + "enumDescriptions": [ + "Reserved IPv6 address can be used on network load balancer.", + "Reserved IPv6 address can be used on VM." + ], + "type": "string" + }, "kind": { "default": "compute#address", "description": "[Output Only] Type of the resource. Always compute#address for addresses.", @@ -31743,7 +33706,7 @@ "type": "integer" }, "purpose": { - "description": "The purpose of this resource, which can be one of the following values: - GCE_ENDPOINT for addresses that are used by VM instances, alias IP ranges, load balancers, and similar resources. - DNS_RESOLVER for a DNS resolver address in a subnetwork for a Cloud DNS inbound forwarder IP addresses (regional internal IP address in a subnet of a VPC network) - VPC_PEERING for global internal IP addresses used for private services access allocated ranges. - NAT_AUTO for the regional external IP addresses used by Cloud NAT when allocating addresses using automatic NAT IP address allocation. - IPSEC_INTERCONNECT for addresses created from a private IP range that are reserved for a VLAN attachment in an *IPsec-encrypted Cloud Interconnect* configuration. These addresses are regional resources. Not currently available publicly. - `SHARED_LOADBALANCER_VIP` for an internal IP address that is assigned to multiple internal forwarding rules. - `PRIVATE_SERVICE_CONNECT` for a private network address that is used to configure Private Service Connect. Only global internal addresses can use this purpose. ", + "description": "The purpose of this resource, which can be one of the following values: - GCE_ENDPOINT for addresses that are used by VM instances, alias IP ranges, load balancers, and similar resources. - DNS_RESOLVER for a DNS resolver address in a subnetwork for a Cloud DNS inbound forwarder IP addresses (regional internal IP address in a subnet of a VPC network) - VPC_PEERING for global internal IP addresses used for private services access allocated ranges. - NAT_AUTO for the regional external IP addresses used by Cloud NAT when allocating addresses using automatic NAT IP address allocation. - IPSEC_INTERCONNECT for addresses created from a private IP range that are reserved for a VLAN attachment in an *HA VPN over Cloud Interconnect* configuration. These addresses are regional resources. - `SHARED_LOADBALANCER_VIP` for an internal IP address that is assigned to multiple internal forwarding rules. - `PRIVATE_SERVICE_CONNECT` for a private network address that is used to configure Private Service Connect. Only global internal addresses can use this purpose. ", "enum": [ "DNS_RESOLVER", "GCE_ENDPOINT", @@ -31757,7 +33720,7 @@ "enumDescriptions": [ "DNS resolver address in the subnetwork.", "VM internal/alias IP, Internal LB service IP, etc.", - "A regional internal IP address range reserved for the VLAN attachment that is used in IPsec-encrypted Cloud Interconnect. This regional internal IP address range must not overlap with any IP address range of subnet/route in the VPC network and its peering networks. After the VLAN attachment is created with the reserved IP address range, when creating a new VPN gateway, its interface IP address is allocated from the associated VLAN attachment’s IP address range.", + "A regional internal IP address range reserved for the VLAN attachment that is used in HA VPN over Cloud Interconnect. This regional internal IP address range must not overlap with any IP address range of subnet/route in the VPC network and its peering networks. After the VLAN attachment is created with the reserved IP address range, when creating a new VPN gateway, its interface IP address is allocated from the associated VLAN attachment’s IP address range.", "External IP automatically reserved for Cloud NAT.", "A private network IP address that can be used to configure Private Service Connect. This purpose can be specified only for GLOBAL addresses of Type INTERNAL", "A regional internal IP address range reserved for Serverless.", @@ -32167,6 +34130,11 @@ "description": "The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed.", "format": "int32", "type": "integer" + }, + "visibleCoreCount": { + "description": "The number of physical cores to expose to an instance. Multiply by the number of threads per core to compute the total number of virtual CPUs to expose to the instance. If unset, the number of cores is inferred from the instance's nominal CPU count and the underlying platform's SMT width.", + "format": "int32", + "type": "integer" } }, "type": "object" @@ -32186,6 +34154,28 @@ }, "type": "object" }, + "AllocationResourceStatus": { + "description": "[Output Only] Contains output only fields.", + "id": "AllocationResourceStatus", + "properties": { + "specificSkuAllocation": { + "$ref": "AllocationResourceStatusSpecificSKUAllocation", + "description": "Allocation Properties of this reservation." + } + }, + "type": "object" + }, + "AllocationResourceStatusSpecificSKUAllocation": { + "description": "Contains Properties set for the reservation.", + "id": "AllocationResourceStatusSpecificSKUAllocation", + "properties": { + "sourceInstanceTemplateId": { + "description": "ID of the instance template used to populate reservation properties.", + "type": "string" + } + }, + "type": "object" + }, "AllocationSpecificSKUAllocationAllocatedInstancePropertiesReservedDisk": { "id": "AllocationSpecificSKUAllocationAllocatedInstancePropertiesReservedDisk", "properties": { @@ -32264,6 +34254,10 @@ "instanceProperties": { "$ref": "AllocationSpecificSKUAllocationReservedInstanceProperties", "description": "The instance properties for the reservation." + }, + "sourceInstanceTemplate": { + "description": "Specifies the instance template to create the reservation. If you use this field, you must exclude the instanceProperties field. This field is optional, and it can be a full or partial URL. For example, the following are all valid URLs to an instance template: - https://www.googleapis.com/compute/v1/projects/project /global/instanceTemplates/instanceTemplate - projects/project/global/instanceTemplates/instanceTemplate - global/instanceTemplates/instanceTemplate ", + "type": "string" } }, "type": "object" @@ -32307,6 +34301,10 @@ "format": "int64", "type": "string" }, + "forceAttach": { + "description": "[Input Only] Whether to force attach the regional disk even if it's currently attached to another instance. If you try to force attach a zonal disk to an instance, you will receive an error.", + "type": "boolean" + }, "guestOsFeatures": { "description": "A list of features to enable on the guest operating system. Applicable only for bootable images. Read Enabling guest operating system features to see a list of available options.", "items": { @@ -32324,7 +34322,7 @@ "description": "[Input Only] Specifies the parameters for a new disk that will be created alongside the new instance. Use initialization parameters to create boot disks or local SSDs attached to the new instance. This property is mutually exclusive with the source property; you can only define one or the other, but not both." }, "interface": { - "description": "Specifies the disk interface to use for attaching this disk, which is either SCSI or NVME. The default is SCSI. Persistent disks must always use SCSI and the request will fail if you attempt to attach a persistent disk in any other format than SCSI. Local SSDs can use either NVME or SCSI. For performance characteristics of SCSI over NVMe, see Local SSD performance.", + "description": "Specifies the disk interface to use for attaching this disk, which is either SCSI or NVME. For most machine types, the default is SCSI. Local SSDs can use either NVME or SCSI. In certain configurations, persistent disks can use NVMe. For more information, see About persistent disks.", "enum": [ "NVME", "SCSI" @@ -32414,7 +34412,7 @@ "type": "string" }, "diskType": { - "description": "Specifies the disk type to use to create the instance. If not specified, the default is pd-standard, specified using the full URL. For example: https://www.googleapis.com/compute/v1/projects/project/zones/zone /diskTypes/pd-standard For a full list of acceptable values, see Persistent disk types. If you define this field, you can provide either the full or partial URL. For example, the following are valid values: - https://www.googleapis.com/compute/v1/projects/project/zones/zone /diskTypes/diskType - projects/project/zones/zone/diskTypes/diskType - zones/zone/diskTypes/diskType Note that for InstanceTemplate, this is the name of the disk type, not URL.", + "description": "Specifies the disk type to use to create the instance. If not specified, the default is pd-standard, specified using the full URL. For example: https://www.googleapis.com/compute/v1/projects/project/zones/zone /diskTypes/pd-standard For a full list of acceptable values, see Persistent disk types. If you specify this field when creating a VM, you can provide either the full or partial URL. For example, the following values are valid: - https://www.googleapis.com/compute/v1/projects/project/zones/zone /diskTypes/diskType - projects/project/zones/zone/diskTypes/diskType - zones/zone/diskTypes/diskType If you specify this field when creating or updating an instance template or all-instances configuration, specify the type of the disk, not the URL. For example: pd-standard.", "type": "string" }, "labels": { @@ -32450,6 +34448,13 @@ "format": "int64", "type": "string" }, + "resourceManagerTags": { + "additionalProperties": { + "type": "string" + }, + "description": "Resource manager tags to be bound to the disk. Tag keys and values have the same definition as resource manager tags. Keys must be in the format `tagKeys/{tag_key_id}`, and values are in the format `tagValues/456`. The field is ignored (both PUT \u0026 PATCH) when empty.", + "type": "object" + }, "resourcePolicies": { "description": "Resource policies applied to this disk for automatic snapshot creations. Specified using the full or partial URL. For instance template, specify only the resource policy name.", "items": { @@ -32463,7 +34468,7 @@ }, "sourceImageEncryptionKey": { "$ref": "CustomerEncryptionKey", - "description": "The customer-supplied encryption key of the source image. Required if the source image is protected by a customer-supplied encryption key. Instance templates do not store customer-supplied encryption keys, so you cannot create disks for instances in a managed instance group if the source images are encrypted with your own keys." + "description": "The customer-supplied encryption key of the source image. Required if the source image is protected by a customer-supplied encryption key. InstanceTemplate and InstancePropertiesPatch do not store customer-supplied encryption keys, so you cannot create disks for instances in a managed instance group if the source images are encrypted with your own keys." }, "sourceSnapshot": { "description": "The source snapshot to create this disk. When creating a new instance, one of initializeParams.sourceSnapshot or initializeParams.sourceImage or disks.source is required except for local SSD. To create a disk with a snapshot that you created, specify the snapshot name in the following format: global/snapshots/my-backup If the source snapshot is deleted later, this field will not be set.", @@ -33263,7 +35268,7 @@ "type": "string" }, "capacityScaler": { - "description": "A multiplier applied to the backend's target capacity of its balancing mode. The default value is 1, which means the group serves up to 100% of its configured capacity (depending on balancingMode). A setting of 0 means the group is completely drained, offering 0% of its available capacity. The valid ranges are 0.0 and [0.1,1.0]. You cannot configure a setting larger than 0 and smaller than 0.1. You cannot configure a setting of 0 when there is only one backend attached to the backend service.", + "description": "A multiplier applied to the backend's target capacity of its balancing mode. The default value is 1, which means the group serves up to 100% of its configured capacity (depending on balancingMode). A setting of 0 means the group is completely drained, offering 0% of its available capacity. The valid ranges are 0.0 and [0.1,1.0]. You cannot configure a setting larger than 0 and smaller than 0.1. You cannot configure a setting of 0 when there is only one backend attached to the backend service. Not available with backends that don't support using a balancingMode. This includes backends such as global internet NEGs, regional serverless NEGs, and PSC NEGs.", "format": "float", "type": "number" }, @@ -33329,6 +35334,18 @@ "$ref": "BackendBucketCdnPolicy", "description": "Cloud CDN configuration for this BackendBucket." }, + "compressionMode": { + "description": "Compress text responses using Brotli or gzip compression, based on the client's Accept-Encoding header.", + "enum": [ + "AUTOMATIC", + "DISABLED" + ], + "enumDescriptions": [ + "Automatically uses the best compression based on the Accept-Encoding header sent by the client.", + "Disables compression. Existing compressed responses cached by Cloud CDN will not be served to clients." + ], + "type": "string" + }, "creationTimestamp": { "description": "[Output Only] Creation timestamp in RFC3339 text format.", "type": "string" @@ -33647,6 +35664,18 @@ "circuitBreakers": { "$ref": "CircuitBreakers" }, + "compressionMode": { + "description": "Compress text responses using Brotli or gzip compression, based on the client's Accept-Encoding header.", + "enum": [ + "AUTOMATIC", + "DISABLED" + ], + "enumDescriptions": [ + "Automatically uses the best compression based on the Accept-Encoding header sent by the client.", + "Disables compression. Existing compressed responses cached by Cloud CDN will not be served to clients." + ], + "type": "string" + }, "connectionDraining": { "$ref": "ConnectionDraining" }, @@ -33739,7 +35768,7 @@ "type": "string" }, "localityLbPolicies": { - "description": "A list of locality load balancing policies to be used in order of preference. Either the policy or the customPolicy field should be set. Overrides any value set in the localityLbPolicy field. localityLbPolicies is only supported when the BackendService is referenced by a URL Map that is referenced by a target gRPC proxy that has the validateForProxyless field set to true.", + "description": "A list of locality load-balancing policies to be used in order of preference. When you use localityLbPolicies, you must set at least one value for either the localityLbPolicies[].policy or the localityLbPolicies[].customPolicy field. localityLbPolicies overrides any value set in the localityLbPolicy field. For an example of how to use this field, see Define a list of preferred policies. Caution: This field and its children are intended for use in a service mesh that includes gRPC clients only. Envoy proxies can't use backend services that have this configuration.", "items": { "$ref": "BackendServiceLocalityLoadBalancingPolicyConfig" }, @@ -33754,7 +35783,8 @@ "ORIGINAL_DESTINATION", "RANDOM", "RING_HASH", - "ROUND_ROBIN" + "ROUND_ROBIN", + "WEIGHTED_MAGLEV" ], "enumDescriptions": [ "", @@ -33763,7 +35793,8 @@ "Backend host is selected based on the client connection metadata, i.e., connections are opened to the same address as the destination address of the incoming connection before the connection was redirected to the load balancer.", "The load balancer selects a random healthy host.", "The ring/modulo hash load balancer implements consistent hashing to backends. The algorithm has the property that the addition/removal of a host from a set of N hosts only affects 1/N of the requests.", - "This is a simple policy in which each healthy backend is selected in round robin order. This is the default." + "This is a simple policy in which each healthy backend is selected in round robin order. This is the default.", + "Per-instance weighted Load Balancing via health check reported weights. If set, the Backend Service must configure a non legacy HTTP-based Health Check, and health check replies are expected to contain non-standard HTTP response header field X-Load-Balancing-Endpoint-Weight to specify the per-instance weights. If set, Load Balancing is weighted based on the per-instance weights reported in the last processed health check replies, as long as every instance either reported a valid weight or had UNAVAILABLE_WEIGHT. Otherwise, Load Balancing remains equal-weight. This option is only supported in Network Load Balancing." ], "type": "string" }, @@ -33786,7 +35817,7 @@ }, "outlierDetection": { "$ref": "OutlierDetection", - "description": "Settings controlling the eviction of unhealthy hosts from the load balancing pool for the backend service. If not set, this feature is considered disabled. This field is applicable to either: - A regional backend service with the service_protocol set to HTTP, HTTPS, or HTTP2, and load_balancing_scheme set to INTERNAL_MANAGED. - A global backend service with the load_balancing_scheme set to INTERNAL_SELF_MANAGED. Not supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true." + "description": "Settings controlling the eviction of unhealthy hosts from the load balancing pool for the backend service. If not set, this feature is considered disabled. This field is applicable to either: - A regional backend service with the service_protocol set to HTTP, HTTPS, HTTP2, or GRPC, and load_balancing_scheme set to INTERNAL_MANAGED. - A global backend service with the load_balancing_scheme set to INTERNAL_SELF_MANAGED. " }, "port": { "description": "Deprecated in favor of portName. The TCP port to connect on the backend. The default value is 80. For Internal TCP/UDP Load Balancing and Network Load Balancing, omit port.", @@ -34370,7 +36401,7 @@ "type": "string" }, "name": { - "description": "Identifies the custom policy. The value should match the type the custom implementation is registered with on the gRPC clients. It should follow protocol buffer message naming conventions and include the full path (e.g. myorg.CustomLbPolicy). The maximum length is 256 characters. Note that specifying the same custom policy more than once for a backend is not a valid configuration and will be rejected.", + "description": "Identifies the custom policy. The value should match the name of a custom implementation registered on the gRPC clients. It should follow protocol buffer message naming conventions and include the full path (for example, myorg.CustomLbPolicy). The maximum length is 256 characters. Do not specify the same custom policy more than once for a backend. If you do, the configuration is rejected. For an example of how to use this field, see Use a custom policy.", "type": "string" } }, @@ -34381,7 +36412,7 @@ "id": "BackendServiceLocalityLoadBalancingPolicyConfigPolicy", "properties": { "name": { - "description": "The name of a locality load balancer policy to be used. The value should be one of the predefined ones as supported by localityLbPolicy, although at the moment only ROUND_ROBIN is supported. This field should only be populated when the customPolicy field is not used. Note that specifying the same policy more than once for a backend is not a valid configuration and will be rejected.", + "description": "The name of a locality load-balancing policy. Valid values include ROUND_ROBIN and, for Java clients, LEAST_REQUEST. For information about these values, see the description of localityLbPolicy. Do not specify the same policy more than once for a backend. If you do, the configuration is rejected.", "enum": [ "INVALID_LB_POLICY", "LEAST_REQUEST", @@ -34389,7 +36420,8 @@ "ORIGINAL_DESTINATION", "RANDOM", "RING_HASH", - "ROUND_ROBIN" + "ROUND_ROBIN", + "WEIGHTED_MAGLEV" ], "enumDescriptions": [ "", @@ -34398,7 +36430,8 @@ "Backend host is selected based on the client connection metadata, i.e., connections are opened to the same address as the destination address of the incoming connection before the connection was redirected to the load balancer.", "The load balancer selects a random healthy host.", "The ring/modulo hash load balancer implements consistent hashing to backends. The algorithm has the property that the addition/removal of a host from a set of N hosts only affects 1/N of the requests.", - "This is a simple policy in which each healthy backend is selected in round robin order. This is the default." + "This is a simple policy in which each healthy backend is selected in round robin order. This is the default.", + "Per-instance weighted Load Balancing via health check reported weights. If set, the Backend Service must configure a non legacy HTTP-based Health Check, and health check replies are expected to contain non-standard HTTP response header field X-Load-Balancing-Endpoint-Weight to specify the per-instance weights. If set, Load Balancing is weighted based on the per-instance weights reported in the last processed health check replies, as long as every instance either reported a valid weight or had UNAVAILABLE_WEIGHT. Otherwise, Load Balancing remains equal-weight. This option is only supported in Network Load Balancing." ], "type": "string" } @@ -34410,11 +36443,32 @@ "id": "BackendServiceLogConfig", "properties": { "enable": { - "description": "This field denotes whether to enable logging for the load balancer traffic served by this backend service.", + "description": "Denotes whether to enable logging for the load balancer traffic served by this backend service. The default value is false.", "type": "boolean" }, + "optionalFields": { + "description": "This field can only be specified if logging is enabled for this backend service and \"logConfig.optionalMode\" was set to CUSTOM. Contains a list of optional fields you want to include in the logs. For example: serverInstance, serverGkeDetails.cluster, serverGkeDetails.pod.podNamespace", + "items": { + "type": "string" + }, + "type": "array" + }, + "optionalMode": { + "description": "This field can only be specified if logging is enabled for this backend service. Configures whether all, none or a subset of optional fields should be added to the reported logs. One of [INCLUDE_ALL_OPTIONAL, EXCLUDE_ALL_OPTIONAL, CUSTOM]. Default is EXCLUDE_ALL_OPTIONAL.", + "enum": [ + "CUSTOM", + "EXCLUDE_ALL_OPTIONAL", + "INCLUDE_ALL_OPTIONAL" + ], + "enumDescriptions": [ + "A subset of optional fields.", + "None optional fields.", + "All optional fields." + ], + "type": "string" + }, "sampleRate": { - "description": "This field can only be specified if logging is enabled for this backend service. The value of the field must be in [0, 1]. This configures the sampling rate of requests to the load balancer where 1.0 means all logged requests are reported and 0.0 means no logged requests are reported. The default value is 0.0.", + "description": "This field can only be specified if logging is enabled for this backend service. The value of the field must be in [0, 1]. This configures the sampling rate of requests to the load balancer where 1.0 means all logged requests are reported and 0.0 means no logged requests are reported. The default value is 1.0.", "format": "float", "type": "number" } @@ -34788,7 +36842,7 @@ "description": "The condition that is associated with this binding. If the condition evaluates to `true`, then this binding applies to the current request. If the condition evaluates to `false`, then this binding does not apply to the current request. However, a different role binding might grant the same role to one or more of the principals in this binding. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies)." }, "members": { - "description": "Specifies the principals requesting access for a Google Cloud resource. `members` can have the following values: * `allUsers`: A special identifier that represents anyone who is on the internet; with or without a Google account. * `allAuthenticatedUsers`: A special identifier that represents anyone who is authenticated with a Google account or a service account. * `user:{emailid}`: An email address that represents a specific Google account. For example, `alice@example.com` . * `serviceAccount:{emailid}`: An email address that represents a service account. For example, `my-other-app@appspot.gserviceaccount.com`. * `group:{emailid}`: An email address that represents a Google group. For example, `admins@example.com`. * `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a user that has been recently deleted. For example, `alice@example.com?uid=123456789012345678901`. If the user is recovered, this value reverts to `user:{emailid}` and the recovered user retains the role in the binding. * `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a service account that has been recently deleted. For example, `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`. If the service account is undeleted, this value reverts to `serviceAccount:{emailid}` and the undeleted service account retains the role in the binding. * `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a Google group that has been recently deleted. For example, `admins@example.com?uid=123456789012345678901`. If the group is recovered, this value reverts to `group:{emailid}` and the recovered group retains the role in the binding. * `domain:{domain}`: The G Suite domain (primary) that represents all the users of that domain. For example, `google.com` or `example.com`. ", + "description": "Specifies the principals requesting access for a Google Cloud resource. `members` can have the following values: * `allUsers`: A special identifier that represents anyone who is on the internet; with or without a Google account. * `allAuthenticatedUsers`: A special identifier that represents anyone who is authenticated with a Google account or a service account. Does not include identities that come from external identity providers (IdPs) through identity federation. * `user:{emailid}`: An email address that represents a specific Google account. For example, `alice@example.com` . * `serviceAccount:{emailid}`: An email address that represents a Google service account. For example, `my-other-app@appspot.gserviceaccount.com`. * `serviceAccount:{projectid}.svc.id.goog[{namespace}/{kubernetes-sa}]`: An identifier for a [Kubernetes service account](https://cloud.google.com/kubernetes-engine/docs/how-to/kubernetes-service-accounts). For example, `my-project.svc.id.goog[my-namespace/my-kubernetes-sa]`. * `group:{emailid}`: An email address that represents a Google group. For example, `admins@example.com`. * `domain:{domain}`: The G Suite domain (primary) that represents all the users of that domain. For example, `google.com` or `example.com`. * `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a user that has been recently deleted. For example, `alice@example.com?uid=123456789012345678901`. If the user is recovered, this value reverts to `user:{emailid}` and the recovered user retains the role in the binding. * `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a service account that has been recently deleted. For example, `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`. If the service account is undeleted, this value reverts to `serviceAccount:{emailid}` and the undeleted service account retains the role in the binding. * `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a Google group that has been recently deleted. For example, `admins@example.com?uid=123456789012345678901`. If the group is recovered, this value reverts to `group:{emailid}` and the recovered group retains the role in the binding.", "items": { "type": "string" }, @@ -34992,6 +37046,13 @@ "$ref": "LicenseResourceCommitment", "description": "The license specification required as part of a license commitment." }, + "mergeSourceCommitments": { + "description": "List of source commitments to be merged into a new commitment.", + "items": { + "type": "string" + }, + "type": "array" + }, "name": { "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", @@ -35033,6 +37094,10 @@ "description": "[Output Only] Server-defined URL for the resource.", "type": "string" }, + "splitSourceCommitment": { + "description": "Source commitment to be splitted into a new commitment.", + "type": "string" + }, "startTimestamp": { "description": "[Output Only] Commitment start time in RFC3339 text format.", "type": "string" @@ -35048,7 +37113,7 @@ ], "enumDescriptions": [ "", - "", + "Deprecate CANCELED status. Will use separate status to differentiate cancel by mergeCud or manual cancellation.", "", "", "" @@ -35065,6 +37130,7 @@ "ACCELERATOR_OPTIMIZED", "COMPUTE_OPTIMIZED", "COMPUTE_OPTIMIZED_C2D", + "COMPUTE_OPTIMIZED_C3", "GENERAL_PURPOSE", "GENERAL_PURPOSE_E2", "GENERAL_PURPOSE_N2", @@ -35085,6 +37151,7 @@ "", "", "", + "", "" ], "type": "string" @@ -35606,7 +37673,7 @@ "type": "array" }, "allowOriginRegexes": { - "description": "Specifies a regular expression that matches allowed origins. For more information about the regular expression syntax, see Syntax. An origin is allowed if it matches either an item in allowOrigins or an item in allowOriginRegexes.", + "description": "Specifies a regular expression that matches allowed origins. For more information about the regular expression syntax, see Syntax. An origin is allowed if it matches either an item in allowOrigins or an item in allowOriginRegexes. Regular expressions can only be used when the loadBalancingScheme is set to INTERNAL_SELF_MANAGED.", "items": { "type": "string" }, @@ -35817,6 +37884,10 @@ "description": "Internal use only.", "type": "string" }, + "params": { + "$ref": "DiskParams", + "description": "Input only. [Input Only] Additional params passed with the request, but not persisted as part of resource payload." + }, "physicalBlockSizeBytes": { "description": "Physical block size of the persistent disk, in bytes. If not present in a request, a default value is used. The currently supported size is 4096, other sizes may be added in the future. If an unsupported value is requested, the error message will list the supported values for the caller's project.", "format": "int64", @@ -36232,6 +38303,20 @@ }, "type": "object" }, + "DiskParams": { + "description": "Additional disk params.", + "id": "DiskParams", + "properties": { + "resourceManagerTags": { + "additionalProperties": { + "type": "string" + }, + "description": "Resource manager tags to be bound to the disk. Tag keys and values have the same definition as resource manager tags. Keys must be in the format `tagKeys/{tag_key_id}`, and values are in the format `tagValues/456`. The field is ignored (both PUT \u0026 PATCH) when empty.", + "type": "object" + } + }, + "type": "object" + }, "DiskType": { "description": "Represents a Disk Type resource. Google Compute Engine has two Disk Type resources: * [Regional](/compute/docs/reference/rest/v1/regionDiskTypes) * [Zonal](/compute/docs/reference/rest/v1/diskTypes) You can choose from a variety of disk types based on your needs. For more information, read Storage options. The diskTypes resource represents disk types for a zonal persistent disk. For more information, read Zonal persistent disks. The regionDiskTypes resource represents disk types for a regional persistent disk. For more information, read Regional persistent disks.", "id": "DiskType", @@ -36861,7 +38946,7 @@ "type": "object" }, "reason": { - "description": "The reason of the error. This is a constant value that identifies the proximate cause of the error. Error reasons are unique within a particular domain of errors. This should be at most 63 characters and match /[A-Z0-9_]+/.", + "description": "The reason of the error. This is a constant value that identifies the proximate cause of the error. Error reasons are unique within a particular domain of errors. This should be at most 63 characters and match a regular expression of `A-Z+[A-Z0-9]`, which represents UPPER_SNAKE_CASE.", "type": "string" } }, @@ -37342,7 +39427,7 @@ "type": "array" }, "direction": { - "description": "Direction of traffic to which this firewall applies, either `INGRESS` or `EGRESS`. The default is `INGRESS`. For `INGRESS` traffic, you cannot specify the destinationRanges field, and for `EGRESS` traffic, you cannot specify the sourceRanges or sourceTags fields.", + "description": "Direction of traffic to which this firewall applies, either `INGRESS` or `EGRESS`. The default is `INGRESS`. For `EGRESS` traffic, you cannot specify the sourceTags fields.", "enum": [ "EGRESS", "INGRESS" @@ -37614,7 +39699,7 @@ "type": "string" }, "displayName": { - "description": "Deprecated, please use short name instead. User-provided name of the Organization firewall policy. The name should be unique in the organization in which the firewall policy is created. This name must be set on creation and cannot be changed. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "description": "Deprecated, please use short name instead. User-provided name of the Organization firewall policy. The name should be unique in the organization in which the firewall policy is created. This field is not applicable to network firewall policies. This name must be set on creation and cannot be changed. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "type": "string" }, @@ -37634,11 +39719,11 @@ "type": "string" }, "name": { - "description": "[Output Only] Name of the resource. It is a numeric ID allocated by GCP which uniquely identifies the Firewall Policy.", + "description": "Name of the resource. For Organization Firewall Policies it's a [Output Only] numeric ID allocated by Google Cloud which uniquely identifies the Organization Firewall Policy.", "type": "string" }, "parent": { - "description": "[Output Only] The parent of the firewall policy.", + "description": "[Output Only] The parent of the firewall policy. This field is not applicable to network firewall policies.", "type": "string" }, "region": { @@ -37666,7 +39751,7 @@ "type": "string" }, "shortName": { - "description": "User-provided name of the Organization firewall plicy. The name should be unique in the organization in which the firewall policy is created. This name must be set on creation and cannot be changed. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "description": "User-provided name of the Organization firewall policy. The name should be unique in the organization in which the firewall policy is created. This field is not applicable to network firewall policies. This name must be set on creation and cannot be changed. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "type": "string" } @@ -37819,7 +39904,7 @@ "id": "FirewallPolicyRule", "properties": { "action": { - "description": "The Action to perform when the client connection triggers the rule. Can currently be either \"allow\" or \"deny()\" where valid values for status are 403, 404, and 502.", + "description": "The Action to perform when the client connection triggers the rule. Valid actions are \"allow\", \"deny\" and \"goto_next\".", "type": "string" }, "description": { @@ -37995,7 +40080,7 @@ "id": "ForwardingRule", "properties": { "IPAddress": { - "description": "IP address for which this forwarding rule accepts traffic. When a client sends traffic to this IP address, the forwarding rule directs the traffic to the referenced target or backendService. While creating a forwarding rule, specifying an IPAddress is required under the following circumstances: - When the target is set to targetGrpcProxy and validateForProxyless is set to true, the IPAddress should be set to 0.0.0.0. - When the target is a Private Service Connect Google APIs bundle, you must specify an IPAddress. Otherwise, you can optionally specify an IP address that references an existing static (reserved) IP address resource. When omitted, Google Cloud assigns an ephemeral IP address. Use one of the following formats to specify an IP address while creating a forwarding rule: * IP address number, as in `100.1.2.3` * Full resource URL, as in https://www.googleapis.com/compute/v1/projects/project_id/regions/region /addresses/address-name * Partial URL or by name, as in: - projects/project_id/regions/region/addresses/address-name - regions/region/addresses/address-name - global/addresses/address-name - address-name The forwarding rule's target or backendService, and in most cases, also the loadBalancingScheme, determine the type of IP address that you can use. For detailed information, see [IP address specifications](https://cloud.google.com/load-balancing/docs/forwarding-rule-concepts#ip_address_specifications). When reading an IPAddress, the API always returns the IP address number.", + "description": "IP address for which this forwarding rule accepts traffic. When a client sends traffic to this IP address, the forwarding rule directs the traffic to the referenced target or backendService. While creating a forwarding rule, specifying an IPAddress is required under the following circumstances: - When the target is set to targetGrpcProxy and validateForProxyless is set to true, the IPAddress should be set to 0.0.0.0. - When the target is a Private Service Connect Google APIs bundle, you must specify an IPAddress. Otherwise, you can optionally specify an IP address that references an existing static (reserved) IP address resource. When omitted, Google Cloud assigns an ephemeral IP address. Use one of the following formats to specify an IP address while creating a forwarding rule: * IP address number, as in `100.1.2.3` * IPv6 address range, as in `2600:1234::/96` * Full resource URL, as in https://www.googleapis.com/compute/v1/projects/ project_id/regions/region/addresses/address-name * Partial URL or by name, as in: - projects/project_id/regions/region/addresses/address-name - regions/region/addresses/address-name - global/addresses/address-name - address-name The forwarding rule's target or backendService, and in most cases, also the loadBalancingScheme, determine the type of IP address that you can use. For detailed information, see [IP address specifications](https://cloud.google.com/load-balancing/docs/forwarding-rule-concepts#ip_address_specifications). When reading an IPAddress, the API always returns the IP address number.", "type": "string" }, "IPProtocol": { @@ -38021,7 +40106,7 @@ "type": "string" }, "allPorts": { - "description": "This field is used along with the backend_service field for Internal TCP/UDP Load Balancing or Network Load Balancing, or with the target field for internal and external TargetInstance. You can only use one of ports and port_range, or allPorts. The three are mutually exclusive. For TCP, UDP and SCTP traffic, packets addressed to any ports will be forwarded to the target or backendService.", + "description": "This field can only be used: - If IPProtocol is one of TCP, UDP, or SCTP. - By internal TCP/UDP load balancers, backend service-based network load balancers, and internal and external protocol forwarding. Set this field to true to allow packets addressed to any port or packets lacking destination port information (for example, UDP fragments after the first fragment) to be forwarded to the backends configured with this forwarding rule. The ports, port_range, and allPorts fields are mutually exclusive.", "type": "boolean" }, "allowGlobalAccess": { @@ -38032,6 +40117,10 @@ "description": "Identifies the backend service to which the forwarding rule sends traffic. Required for Internal TCP/UDP Load Balancing and Network Load Balancing; must be omitted for all other load balancer types.", "type": "string" }, + "baseForwardingRule": { + "description": "[Output Only] The URL for the corresponding base Forwarding Rule. By base Forwarding Rule, we mean the Forwarding Rule that has the same IP address, protocol, and port settings with the current Forwarding Rule, but without sourceIPRanges specified. Always empty if the current Forwarding Rule does not have sourceIPRanges specified.", + "type": "string" + }, "creationTimestamp": { "description": "[Output Only] Creation timestamp in RFC3339 text format.", "type": "string" @@ -38142,11 +40231,11 @@ "type": "boolean" }, "portRange": { - "description": "This field can be used only if: - Load balancing scheme is one of EXTERNAL, INTERNAL_SELF_MANAGED or INTERNAL_MANAGED - IPProtocol is one of TCP, UDP, or SCTP. Packets addressed to ports in the specified range will be forwarded to target or backend_service. You can only use one of ports, port_range, or allPorts. The three are mutually exclusive. Forwarding rules with the same [IPAddress, IPProtocol] pair must have disjoint ports. Some types of forwarding target have constraints on the acceptable ports. For more information, see [Port specifications](https://cloud.google.com/load-balancing/docs/forwarding-rule-concepts#port_specifications). @pattern: \\\\d+(?:-\\\\d+)?", + "description": "This field can only be used: - If IPProtocol is one of TCP, UDP, or SCTP. - By backend service-based network load balancers, target pool-based network load balancers, internal proxy load balancers, external proxy load balancers, Traffic Director, external protocol forwarding, and Classic VPN. Some products have restrictions on what ports can be used. See port specifications for details. Only packets addressed to ports in the specified range will be forwarded to the backends configured with this forwarding rule. The ports, port_range, and allPorts fields are mutually exclusive. For external forwarding rules, two or more forwarding rules cannot use the same [IPAddress, IPProtocol] pair, and cannot have overlapping portRanges. For internal forwarding rules within the same VPC network, two or more forwarding rules cannot use the same [IPAddress, IPProtocol] pair, and cannot have overlapping portRanges. @pattern: \\\\d+(?:-\\\\d+)?", "type": "string" }, "ports": { - "description": "The ports field is only supported when the forwarding rule references a backend_service directly. Only packets addressed to the [specified list of ports]((https://cloud.google.com/load-balancing/docs/forwarding-rule-concepts#port_specifications)) are forwarded to backends. You can only use one of ports and port_range, or allPorts. The three are mutually exclusive. You can specify a list of up to five ports, which can be non-contiguous. Forwarding rules with the same [IPAddress, IPProtocol] pair must have disjoint ports. @pattern: \\\\d+(?:-\\\\d+)?", + "description": "This field can only be used: - If IPProtocol is one of TCP, UDP, or SCTP. - By internal TCP/UDP load balancers, backend service-based network load balancers, and internal protocol forwarding. You can specify a list of up to five ports by number, separated by commas. The ports can be contiguous or discontiguous. Only packets addressed to these ports will be forwarded to the backends configured with this forwarding rule. For external forwarding rules, two or more forwarding rules cannot use the same [IPAddress, IPProtocol] pair, and cannot share any values defined in ports. For internal forwarding rules within the same VPC network, two or more forwarding rules cannot use the same [IPAddress, IPProtocol] pair, and cannot share any values defined in ports. The ports, port_range, and allPorts fields are mutually exclusive. @pattern: \\\\d+(?:-\\\\d+)?", "items": { "type": "string" }, @@ -38200,11 +40289,19 @@ "description": "[Output Only] The internal fully qualified service name for this Forwarding Rule. This field is only used for internal load balancing.", "type": "string" }, + "sourceIpRanges": { + "description": "If not empty, this Forwarding Rule will only forward the traffic when the source IP address matches one of the IP addresses or CIDR ranges set here. Note that a Forwarding Rule can only have up to 64 source IP ranges, and this field can only be used with a regional Forwarding Rule whose scheme is EXTERNAL. Each source_ip_range entry should be either an IP address (for example, 1.2.3.4) or a CIDR range (for example, 1.2.3.0/24).", + "items": { + "type": "string" + }, + "type": "array" + }, "subnetwork": { "description": "This field identifies the subnetwork that the load balanced IP should belong to for this Forwarding Rule, used in internal load balancing and network load balancing with IPv6. If the network specified is in auto subnet mode, this field is optional. However, a subnetwork must be specified if the network is in custom subnet mode or when creating external forwarding rule with IPv6.", "type": "string" }, "target": { + "description": "The URL of the target resource to receive the matched traffic. For regional forwarding rules, this target must be in the same region as the forwarding rule. For global forwarding rules, this target must be a global load balancing resource. The forwarded traffic must be of a type appropriate to the target object. - For load balancers, see the \"Target\" column in [Port specifications](https://cloud.google.com/load-balancing/docs/forwarding-rule-concepts#ip_address_specifications). - For Private Service Connect forwarding rules that forward traffic to Google APIs, provide the name of a supported Google API bundle: - vpc-sc - APIs that support VPC Service Controls. - all-apis - All supported Google APIs. - For Private Service Connect forwarding rules that forward traffic to managed services, the target must be a service attachment. ", "type": "string" } }, @@ -38595,25 +40692,25 @@ "type": "string" }, "port": { - "description": "The port number for the health check request. Must be specified if port_name and port_specification are not set or if port_specification is USE_FIXED_PORT. Valid values are 1 through 65535.", + "description": "The TCP port number to which the health check prober sends packets. Valid values are 1 through 65535.", "format": "int32", "type": "integer" }, "portName": { - "description": "Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name are defined, port takes precedence. The port_name should conform to RFC1035.", + "description": "Not supported.", "type": "string" }, "portSpecification": { - "description": "Specifies how port is selected for health checking, can be one of following values: USE_FIXED_PORT: The port number in port is used for health checking. USE_NAMED_PORT: The portName is used for health checking. USE_SERVING_PORT: For NetworkEndpointGroup, the port specified for each network endpoint is used for health checking. For other backends, the port or named port specified in the Backend Service is used for health checking. If not specified, gRPC health check follows behavior specified in port and portName fields.", + "description": "Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for pass-through load balancers and backend services for proxy load balancers. Not supported by target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for pass-through load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports.", "enum": [ "USE_FIXED_PORT", "USE_NAMED_PORT", "USE_SERVING_PORT" ], "enumDescriptions": [ - "The port number in port is used for health checking.", - "The portName is used for health checking.", - "For NetworkEndpointGroup, the port specified for each network endpoint is used for health checking. For other backends, the port or named port specified in the Backend Service is used for health checking." + "The port number in the health check's port is used for health checking. Applies to network endpoint group and instance group backends.", + "Not supported.", + "For network endpoint group backends, the health check uses the port number specified on each endpoint in the network endpoint group. For instance group backends, the health check uses the port number specified for the backend service's named port defined in the instance group's named ports." ], "type": "string" } @@ -38777,13 +40874,14 @@ "id": "GuestOsFeature", "properties": { "type": { - "description": "The ID of a supported feature. To add multiple values, use commas to separate values. Set to one or more of the following values: - VIRTIO_SCSI_MULTIQUEUE - WINDOWS - MULTI_IP_SUBNET - UEFI_COMPATIBLE - GVNIC - SEV_CAPABLE - SUSPEND_RESUME_COMPATIBLE - SEV_SNP_CAPABLE For more information, see Enabling guest operating system features.", + "description": "The ID of a supported feature. To add multiple values, use commas to separate values. Set to one or more of the following values: - VIRTIO_SCSI_MULTIQUEUE - WINDOWS - MULTI_IP_SUBNET - UEFI_COMPATIBLE - GVNIC - SEV_CAPABLE - SUSPEND_RESUME_COMPATIBLE - SEV_SNP_CAPABLE - TDX_CAPABLE For more information, see Enabling guest operating system features.", "enum": [ "FEATURE_TYPE_UNSPECIFIED", "GVNIC", "MULTI_IP_SUBNET", "SECURE_BOOT", "SEV_CAPABLE", + "SEV_SNP_CAPABLE", "UEFI_COMPATIBLE", "VIRTIO_SCSI_MULTIQUEUE", "WINDOWS" @@ -38796,6 +40894,7 @@ "", "", "", + "", "" ], "type": "string" @@ -38807,29 +40906,29 @@ "id": "HTTP2HealthCheck", "properties": { "host": { - "description": "The value of the host header in the HTTP/2 health check request. If left empty (default value), the IP on behalf of which this health check is performed will be used.", + "description": "The value of the host header in the HTTP/2 health check request. If left empty (default value), the host header is set to the destination IP address to which health check packets are sent. The destination IP address depends on the type of load balancer. For details, see: https://cloud.google.com/load-balancing/docs/health-check-concepts#hc-packet-dest", "type": "string" }, "port": { - "description": "The TCP port number for the health check request. The default value is 443. Valid values are 1 through 65535.", + "description": "The TCP port number to which the health check prober sends packets. The default value is 443. Valid values are 1 through 65535.", "format": "int32", "type": "integer" }, "portName": { - "description": "Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name are defined, port takes precedence.", + "description": "Not supported.", "type": "string" }, "portSpecification": { - "description": "Specifies how port is selected for health checking, can be one of following values: USE_FIXED_PORT: The port number in port is used for health checking. USE_NAMED_PORT: The portName is used for health checking. USE_SERVING_PORT: For NetworkEndpointGroup, the port specified for each network endpoint is used for health checking. For other backends, the port or named port specified in the Backend Service is used for health checking. If not specified, HTTP2 health check follows behavior specified in port and portName fields.", + "description": "Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for pass-through load balancers and backend services for proxy load balancers. Not supported by target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for pass-through load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports.", "enum": [ "USE_FIXED_PORT", "USE_NAMED_PORT", "USE_SERVING_PORT" ], "enumDescriptions": [ - "The port number in port is used for health checking.", - "The portName is used for health checking.", - "For NetworkEndpointGroup, the port specified for each network endpoint is used for health checking. For other backends, the port or named port specified in the Backend Service is used for health checking." + "The port number in the health check's port is used for health checking. Applies to network endpoint group and instance group backends.", + "Not supported.", + "For network endpoint group backends, the health check uses the port number specified on each endpoint in the network endpoint group. For instance group backends, the health check uses the port number specified for the backend service's named port defined in the instance group's named ports." ], "type": "string" }, @@ -38850,7 +40949,7 @@ "type": "string" }, "response": { - "description": "The string to match anywhere in the first 1024 bytes of the response body. If left empty (the default value), the status code determines health. The response data can only be ASCII.", + "description": "Creates a content-based HTTP/2 health check. In addition to the required HTTP 200 (OK) status code, you can configure the health check to pass only when the backend sends this specific ASCII response string within the first 1024 bytes of the HTTP response body. For details, see: https://cloud.google.com/load-balancing/docs/health-check-concepts#criteria-protocol-http", "type": "string" } }, @@ -38860,29 +40959,29 @@ "id": "HTTPHealthCheck", "properties": { "host": { - "description": "The value of the host header in the HTTP health check request. If left empty (default value), the IP on behalf of which this health check is performed will be used.", + "description": "The value of the host header in the HTTP health check request. If left empty (default value), the host header is set to the destination IP address to which health check packets are sent. The destination IP address depends on the type of load balancer. For details, see: https://cloud.google.com/load-balancing/docs/health-check-concepts#hc-packet-dest", "type": "string" }, "port": { - "description": "The TCP port number for the health check request. The default value is 80. Valid values are 1 through 65535.", + "description": "The TCP port number to which the health check prober sends packets. The default value is 80. Valid values are 1 through 65535.", "format": "int32", "type": "integer" }, "portName": { - "description": "Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name are defined, port takes precedence.", + "description": "Not supported.", "type": "string" }, "portSpecification": { - "description": "Specifies how port is selected for health checking, can be one of following values: USE_FIXED_PORT: The port number in port is used for health checking. USE_NAMED_PORT: The portName is used for health checking. USE_SERVING_PORT: For NetworkEndpointGroup, the port specified for each network endpoint is used for health checking. For other backends, the port or named port specified in the Backend Service is used for health checking. If not specified, HTTP health check follows behavior specified in port and portName fields.", + "description": "Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for pass-through load balancers and backend services for proxy load balancers. Also supported in legacy HTTP health checks for target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for pass-through load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports.", "enum": [ "USE_FIXED_PORT", "USE_NAMED_PORT", "USE_SERVING_PORT" ], "enumDescriptions": [ - "The port number in port is used for health checking.", - "The portName is used for health checking.", - "For NetworkEndpointGroup, the port specified for each network endpoint is used for health checking. For other backends, the port or named port specified in the Backend Service is used for health checking." + "The port number in the health check's port is used for health checking. Applies to network endpoint group and instance group backends.", + "Not supported.", + "For network endpoint group backends, the health check uses the port number specified on each endpoint in the network endpoint group. For instance group backends, the health check uses the port number specified for the backend service's named port defined in the instance group's named ports." ], "type": "string" }, @@ -38903,7 +41002,7 @@ "type": "string" }, "response": { - "description": "The string to match anywhere in the first 1024 bytes of the response body. If left empty (the default value), the status code determines health. The response data can only be ASCII.", + "description": "Creates a content-based HTTP health check. In addition to the required HTTP 200 (OK) status code, you can configure the health check to pass only when the backend sends this specific ASCII response string within the first 1024 bytes of the HTTP response body. For details, see: https://cloud.google.com/load-balancing/docs/health-check-concepts#criteria-protocol-http", "type": "string" } }, @@ -38913,29 +41012,29 @@ "id": "HTTPSHealthCheck", "properties": { "host": { - "description": "The value of the host header in the HTTPS health check request. If left empty (default value), the IP on behalf of which this health check is performed will be used.", + "description": "The value of the host header in the HTTPS health check request. If left empty (default value), the host header is set to the destination IP address to which health check packets are sent. The destination IP address depends on the type of load balancer. For details, see: https://cloud.google.com/load-balancing/docs/health-check-concepts#hc-packet-dest", "type": "string" }, "port": { - "description": "The TCP port number for the health check request. The default value is 443. Valid values are 1 through 65535.", + "description": "The TCP port number to which the health check prober sends packets. The default value is 443. Valid values are 1 through 65535.", "format": "int32", "type": "integer" }, "portName": { - "description": "Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name are defined, port takes precedence.", + "description": "Not supported.", "type": "string" }, "portSpecification": { - "description": "Specifies how port is selected for health checking, can be one of following values: USE_FIXED_PORT: The port number in port is used for health checking. USE_NAMED_PORT: The portName is used for health checking. USE_SERVING_PORT: For NetworkEndpointGroup, the port specified for each network endpoint is used for health checking. For other backends, the port or named port specified in the Backend Service is used for health checking. If not specified, HTTPS health check follows behavior specified in port and portName fields.", + "description": "Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for pass-through load balancers and backend services for proxy load balancers. Not supported by target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for pass-through load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports.", "enum": [ "USE_FIXED_PORT", "USE_NAMED_PORT", "USE_SERVING_PORT" ], "enumDescriptions": [ - "The port number in port is used for health checking.", - "The portName is used for health checking.", - "For NetworkEndpointGroup, the port specified for each network endpoint is used for health checking. For other backends, the port or named port specified in the Backend Service is used for health checking." + "The port number in the health check's port is used for health checking. Applies to network endpoint group and instance group backends.", + "Not supported.", + "For network endpoint group backends, the health check uses the port number specified on each endpoint in the network endpoint group. For instance group backends, the health check uses the port number specified for the backend service's named port defined in the instance group's named ports." ], "type": "string" }, @@ -38956,7 +41055,7 @@ "type": "string" }, "response": { - "description": "The string to match anywhere in the first 1024 bytes of the response body. If left empty (the default value), the status code determines health. The response data can only be ASCII.", + "description": "Creates a content-based HTTPS health check. In addition to the required HTTP 200 (OK) status code, you can configure the health check to pass only when the backend sends this specific ASCII response string within the first 1024 bytes of the HTTP response body. For details, see: https://cloud.google.com/load-balancing/docs/health-check-concepts#criteria-protocol-http", "type": "string" } }, @@ -39223,14 +41322,14 @@ "type": "string" }, "healthChecks": { - "description": "A list of URLs to the HealthCheck resources. Must have at least one HealthCheck, and not more than 10. HealthCheck resources must have portSpecification=USE_SERVING_PORT or portSpecification=USE_FIXED_PORT. For regional HealthCheckService, the HealthCheck must be regional and in the same region. For global HealthCheckService, HealthCheck must be global. Mix of regional and global HealthChecks is not supported. Multiple regional HealthChecks must belong to the same region. Regional HealthChecks must belong to the same region as zones of NEGs.", + "description": "A list of URLs to the HealthCheck resources. Must have at least one HealthCheck, and not more than 10 for regional HealthCheckService, and not more than 1 for global HealthCheckService. HealthCheck resources must have portSpecification=USE_SERVING_PORT or portSpecification=USE_FIXED_PORT. For regional HealthCheckService, the HealthCheck must be regional and in the same region. For global HealthCheckService, HealthCheck must be global. Mix of regional and global HealthChecks is not supported. Multiple regional HealthChecks must belong to the same region. Regional HealthChecks must belong to the same region as zones of NetworkEndpointGroups. For global HealthCheckService using global INTERNET_IP_PORT NetworkEndpointGroups, the global HealthChecks must specify sourceRegions, and HealthChecks that specify sourceRegions can only be used with global INTERNET_IP_PORT NetworkEndpointGroups.", "items": { "type": "string" }, "type": "array" }, "healthStatusAggregationPolicy": { - "description": "Optional. Policy for how the results from multiple health checks for the same endpoint are aggregated. Defaults to NO_AGGREGATION if unspecified. - NO_AGGREGATION. An EndpointHealth message is returned for each pair in the health check service. - AND. If any health check of an endpoint reports UNHEALTHY, then UNHEALTHY is the HealthState of the endpoint. If all health checks report HEALTHY, the HealthState of the endpoint is HEALTHY. .", + "description": "Optional. Policy for how the results from multiple health checks for the same endpoint are aggregated. Defaults to NO_AGGREGATION if unspecified. - NO_AGGREGATION. An EndpointHealth message is returned for each pair in the health check service. - AND. If any health check of an endpoint reports UNHEALTHY, then UNHEALTHY is the HealthState of the endpoint. If all health checks report HEALTHY, the HealthState of the endpoint is HEALTHY. . This is only allowed with regional HealthCheckService.", "enum": [ "AND", "NO_AGGREGATION" @@ -39257,7 +41356,7 @@ "type": "string" }, "networkEndpointGroups": { - "description": "A list of URLs to the NetworkEndpointGroup resources. Must not have more than 100. For regional HealthCheckService, NEGs must be in zones in the region of the HealthCheckService.", + "description": "A list of URLs to the NetworkEndpointGroup resources. Must not have more than 100. For regional HealthCheckService, NEGs must be in zones in the region of the HealthCheckService. For global HealthCheckServices, the NetworkEndpointGroups must be global INTERNET_IP_PORT.", "items": { "type": "string" }, @@ -39904,7 +42003,7 @@ "description": "The header value must be an integer and its value must be in the range specified in rangeMatch. If the header does not contain an integer, number or is empty, the match fails. For example for a range [-5, 0] - -3 will match. - 0 will not match. - 0.25 will not match. - -3someString will not match. Only one of exactMatch, prefixMatch, suffixMatch, regexMatch, presentMatch or rangeMatch must be set. rangeMatch is not supported for load balancers that have loadBalancingScheme set to EXTERNAL." }, "regexMatch": { - "description": "The value of the header must match the regular expression specified in regexMatch. For more information about regular expression syntax, see Syntax. For matching against a port specified in the HTTP request, use a headerMatch with headerName set to PORT and a regular expression that satisfies the RFC2616 Host header's port specifier. Only one of exactMatch, prefixMatch, suffixMatch, regexMatch, presentMatch or rangeMatch must be set. regexMatch only applies to load balancers that have loadBalancingScheme set to INTERNAL_SELF_MANAGED.", + "description": "The value of the header must match the regular expression specified in regexMatch. For more information about regular expression syntax, see Syntax. For matching against a port specified in the HTTP request, use a headerMatch with headerName set to PORT and a regular expression that satisfies the RFC2616 Host header's port specifier. Only one of exactMatch, prefixMatch, suffixMatch, regexMatch, presentMatch or rangeMatch must be set. Regular expressions can only be used when the loadBalancingScheme is set to INTERNAL_SELF_MANAGED.", "type": "string" }, "suffixMatch": { @@ -39980,7 +42079,7 @@ "type": "integer" }, "requestPath": { - "description": "The request path of the HTTP health check request. The default value is /. This field does not support query parameters.", + "description": "The request path of the HTTP health check request. The default value is /. This field does not support query parameters. Must comply with RFC3986.", "type": "string" }, "selfLink": { @@ -40137,7 +42236,7 @@ "type": "boolean" }, "regexMatch": { - "description": "The queryParameterMatch matches if the value of the parameter matches the regular expression specified by regexMatch. For more information about regular expression syntax, see Syntax. Only one of presentMatch, exactMatch, or regexMatch must be set. regexMatch only applies when the loadBalancingScheme is set to INTERNAL_SELF_MANAGED. ", + "description": "The queryParameterMatch matches if the value of the parameter matches the regular expression specified by regexMatch. For more information about regular expression syntax, see Syntax. Only one of presentMatch, exactMatch, or regexMatch must be set. Regular expressions can only be used when the loadBalancingScheme is set to INTERNAL_SELF_MANAGED. ", "type": "string" } }, @@ -40278,7 +42377,7 @@ }, "routeAction": { "$ref": "HttpRouteAction", - "description": "In response to a matching matchRule, the load balancer performs advanced routing actions, such as URL rewrites and header transformations, before forwarding the request to the selected backend. If routeAction specifies any weightedBackendServices, service must not be set. Conversely if service is set, routeAction cannot contain any weightedBackendServices. Only one of urlRedirect, service or routeAction.weightedBackendService must be set. UrlMaps for external HTTP(S) load balancers support only the urlRewrite action within a route rule's routeAction." + "description": "In response to a matching matchRule, the load balancer performs advanced routing actions, such as URL rewrites and header transformations, before forwarding the request to the selected backend. If routeAction specifies any weightedBackendServices, service must not be set. Conversely if service is set, routeAction cannot contain any weightedBackendServices. Only one of urlRedirect, service or routeAction.weightedBackendService must be set. URL maps for Classic external HTTP(S) load balancers only support the urlRewrite action within a route rule's routeAction." }, "service": { "description": "The full or partial URL of the backend service resource to which traffic is directed if this rule is matched. If routeAction is also specified, advanced routing actions, such as URL rewrites, take effect before sending the request to the backend. However, if service is specified, routeAction cannot contain any weightedBackendServices. Conversely, if routeAction specifies any weightedBackendServices, service must not be specified. Only one of urlRedirect, service or routeAction.weightedBackendService must be set.", @@ -40329,7 +42428,7 @@ "type": "array" }, "regexMatch": { - "description": "For satisfying the matchRule condition, the path of the request must satisfy the regular expression specified in regexMatch after removing any query parameters and anchor supplied with the original URL. For more information about regular expression syntax, see Syntax. Only one of prefixMatch, fullPathMatch or regexMatch must be specified. regexMatch only applies to load balancers that have loadBalancingScheme set to INTERNAL_SELF_MANAGED.", + "description": "For satisfying the matchRule condition, the path of the request must satisfy the regular expression specified in regexMatch after removing any query parameters and anchor supplied with the original URL. For more information about regular expression syntax, see Syntax. Only one of prefixMatch, fullPathMatch or regexMatch must be specified. Regular expressions can only be used when the loadBalancingScheme is set to INTERNAL_SELF_MANAGED.", "type": "string" } }, @@ -40382,7 +42481,7 @@ "type": "integer" }, "requestPath": { - "description": "The request path of the HTTPS health check request. The default value is \"/\".", + "description": "The request path of the HTTPS health check request. The default value is \"/\". Must comply with RFC3986.", "type": "string" }, "selfLink": { @@ -40563,7 +42662,7 @@ "type": "string" }, "family": { - "description": "The name of the image family to which this image belongs. You can create disks by specifying an image family instead of a specific image name. The image family always returns its latest image that is not deprecated. The name of the image family must comply with RFC1035.", + "description": "The name of the image family to which this image belongs. The image family name can be from a publicly managed image family provided by Compute Engine, or from a custom image family you create. For example, centos-stream-9 is a publicly available image family. For more information, see Image family best practices. When creating disks, you can specify an image family instead of a specific image name. The image family always returns its latest image that is not deprecated. The name of the image family must comply with RFC1035.", "type": "string" }, "guestOsFeatures": { @@ -41003,11 +43102,6 @@ "type": "string" }, "machineType": { - "annotations": { - "required": [ - "compute.instances.insert" - ] - }, "description": "Full or partial URL of the machine type resource to use for this instance, in the format: zones/zone/machineTypes/machine-type. This is provided by the client when the instance is created. For example, the following is a valid partial url to a predefined machine type: zones/us-central1-f/machineTypes/n1-standard-1 To create a custom machine type, provide a URL to a machine type in the following format, where CPUS is 1 or an even number up to 32 (2, 4, 6, ... 24, etc), and MEMORY is the total memory for this instance. Memory must be a multiple of 256 MB and must be supplied in MB (e.g. 5 GB of memory is 5120 MB): zones/zone/machineTypes/custom-CPUS-MEMORY For example: zones/us-central1-f/machineTypes/custom-4-5120 For a full list of restrictions, read the Specifications for custom machine types.", "type": "string" }, @@ -41068,6 +43162,10 @@ }, "type": "array" }, + "resourceStatus": { + "$ref": "ResourceStatus", + "description": "[Output Only] Specifies values set for instance attributes as compared to the values requested by user in the corresponding input only field." + }, "satisfiesPzs": { "description": "[Output Only] Reserved for future use.", "type": "boolean" @@ -41275,6 +43373,46 @@ }, "type": "object" }, + "InstanceConsumptionData": { + "id": "InstanceConsumptionData", + "properties": { + "consumptionInfo": { + "$ref": "InstanceConsumptionInfo", + "description": "Resources consumed by the instance." + }, + "instance": { + "description": "Server-defined URL for the instance.", + "type": "string" + } + }, + "type": "object" + }, + "InstanceConsumptionInfo": { + "id": "InstanceConsumptionInfo", + "properties": { + "guestCpus": { + "description": "The number of virtual CPUs that are available to the instance.", + "format": "int32", + "type": "integer" + }, + "localSsdGb": { + "description": "The amount of local SSD storage available to the instance, defined in GiB.", + "format": "int32", + "type": "integer" + }, + "memoryMb": { + "description": "The amount of physical memory available to the instance, defined in MiB.", + "format": "int32", + "type": "integer" + }, + "minNodeCpus": { + "description": "The minimal guaranteed number of virtual CPUs that are reserved.", + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, "InstanceGroup": { "description": "Represents an Instance Group resource. Instance Groups can be used to configure a target for load balancing. Instance groups can either be managed or unmanaged. To create managed instance groups, use the instanceGroupManager or regionInstanceGroupManager resource instead. Use zonal unmanaged instance groups if you need to apply load balancing to groups of heterogeneous instances or if you need to manage the instances yourself. You cannot create regional unmanaged instance groups. For more information, read Instance groups.", "id": "InstanceGroup", @@ -41654,6 +43792,18 @@ "description": "[Output Only] The resource type, which is always compute#instanceGroupManager for managed instance groups.", "type": "string" }, + "listManagedInstancesResults": { + "description": "Pagination behavior of the listManagedInstances API method for this managed instance group.", + "enum": [ + "PAGELESS", + "PAGINATED" + ], + "enumDescriptions": [ + "(Default) Pagination is disabled for the group's listManagedInstances API method. maxResults and pageToken query parameters are ignored and all instances are returned in a single response.", + "Pagination is enabled for the group's listManagedInstances API method. maxResults and pageToken query parameters are respected." + ], + "type": "string" + }, "name": { "annotations": { "required": [ @@ -43445,6 +45595,10 @@ "$ref": "InstanceProperties", "description": "The instance properties for this instance template." }, + "region": { + "description": "[Output Only] URL of the region where the instance template resides. Only applicable for regional resources.", + "type": "string" + }, "selfLink": { "description": "[Output Only] The URL for this instance template. The server defines this URL.", "type": "string" @@ -43460,6 +45614,127 @@ }, "type": "object" }, + "InstanceTemplateAggregatedList": { + "description": "Contains a list of InstanceTemplatesScopedList.", + "id": "InstanceTemplateAggregatedList", + "properties": { + "id": { + "description": "[Output Only] Unique identifier for the resource; defined by the server.", + "type": "string" + }, + "items": { + "additionalProperties": { + "$ref": "InstanceTemplatesScopedList", + "description": "The name of the scope that contains this set of instance templates." + }, + "description": "A list of InstanceTemplatesScopedList resources.", + "type": "object" + }, + "kind": { + "default": "compute#instanceTemplateAggregatedList", + "description": "Type of resource.", + "type": "string" + }, + "nextPageToken": { + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for this resource.", + "type": "string" + }, + "warning": { + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", + "LARGE_DEPLOYMENT_WARNING", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "Warning about failed cleanup of transient changes made by a failed operation.", + "A link to a deprecated resource was created.", + "When deploying and at least one of the resources has a type marked as deprecated", + "The user created a boot disk that is larger than image size.", + "When deploying and at least one of the resources has a type marked as experimental", + "Warning that is present in an external api call", + "Warning that value of a field has been overridden. Deprecated unused field.", + "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", + "When deploying a deployment with a exceedingly large number of resources", + "A resource depends on a missing type", + "The route's nextHopIp address is not assigned to an instance on the network.", + "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", + "The route's nextHopInstance URL refers to an instance that does not exist.", + "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", + "The route's next hop instance does not have a status of RUNNING.", + "Error which is not critical. We decided to continue the process despite the mentioned error.", + "No results are present on a particular list page.", + "Success is reported, but some results may be missing due to errors", + "The user attempted to use a resource that requires a TOS they have not accepted.", + "Warning that a resource is in use.", + "One or more of the resources set to auto-delete could not be deleted because they were in use.", + "When a resource schema validation is ignored.", + "Instance template used in instance group manager is valid as such, but its application does not make a lot of sense, because it allows only single instance in instance group.", + "When undeclared properties in the schema are present", + "A given scope cannot be reached." + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example: \"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" } ", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, "InstanceTemplateList": { "description": "A list of instance templates.", "id": "InstanceTemplateList", @@ -43580,6 +45855,108 @@ }, "type": "object" }, + "InstanceTemplatesScopedList": { + "id": "InstanceTemplatesScopedList", + "properties": { + "instanceTemplates": { + "description": "[Output Only] A list of instance templates that are contained within the specified project and zone.", + "items": { + "$ref": "InstanceTemplate" + }, + "type": "array" + }, + "warning": { + "description": "[Output Only] An informational warning that replaces the list of instance templates when the list is empty.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", + "LARGE_DEPLOYMENT_WARNING", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "Warning about failed cleanup of transient changes made by a failed operation.", + "A link to a deprecated resource was created.", + "When deploying and at least one of the resources has a type marked as deprecated", + "The user created a boot disk that is larger than image size.", + "When deploying and at least one of the resources has a type marked as experimental", + "Warning that is present in an external api call", + "Warning that value of a field has been overridden. Deprecated unused field.", + "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", + "When deploying a deployment with a exceedingly large number of resources", + "A resource depends on a missing type", + "The route's nextHopIp address is not assigned to an instance on the network.", + "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", + "The route's nextHopInstance URL refers to an instance that does not exist.", + "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", + "The route's next hop instance does not have a status of RUNNING.", + "Error which is not critical. We decided to continue the process despite the mentioned error.", + "No results are present on a particular list page.", + "Success is reported, but some results may be missing due to errors", + "The user attempted to use a resource that requires a TOS they have not accepted.", + "Warning that a resource is in use.", + "One or more of the resources set to auto-delete could not be deleted because they were in use.", + "When a resource schema validation is ignored.", + "Instance template used in instance group manager is valid as such, but its application does not make a lot of sense, because it allows only single instance in instance group.", + "When undeclared properties in the schema are present", + "A given scope cannot be reached." + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example: \"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" } ", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, "InstanceWithNamedPorts": { "id": "InstanceWithNamedPorts", "properties": { @@ -43864,6 +46241,20 @@ }, "type": "object" }, + "InstancesSetNameRequest": { + "id": "InstancesSetNameRequest", + "properties": { + "currentName": { + "description": "The current name of this resource, used to prevent conflicts. Provide the latest name when making a request to change name.", + "type": "string" + }, + "name": { + "description": "The name to be applied to the instance. Needs to be RFC 1035 compliant.", + "type": "string" + } + }, + "type": "object" + }, "InstancesSetServiceAccountRequest": { "id": "InstancesSetServiceAccountRequest", "properties": { @@ -44011,7 +46402,7 @@ "type": "string" }, "nocContactEmail": { - "description": "Email address to contact the customer NOC for operations and maintenance notifications regarding this Interconnect. If specified, this will be used for notifications in addition to all other forms described, such as Stackdriver logs alerting and Cloud Notifications.", + "description": "Email address to contact the customer NOC for operations and maintenance notifications regarding this Interconnect. If specified, this will be used for notifications in addition to all other forms described, such as Cloud Monitoring logs alerting and Cloud Notifications. This field is required for users who sign up for Cloud Interconnect using workforce identity federation.", "type": "string" }, "operationalStatus": { @@ -44041,7 +46432,7 @@ "type": "integer" }, "satisfiesPzs": { - "description": "[Output Only] Set to true if the resource satisfies the zone separation organization policy constraints and false otherwise. Defaults to false if the field is not present.", + "description": "[Output Only] Reserved for future use.", "type": "boolean" }, "selfLink": { @@ -44169,13 +46560,13 @@ "type": "string" }, "encryption": { - "description": "Indicates the user-supplied encryption option of this VLAN attachment (interconnectAttachment). Can only be specified at attachment creation for PARTNER or DEDICATED attachments. Possible values are: - NONE - This is the default value, which means that the VLAN attachment carries unencrypted traffic. VMs are able to send traffic to, or receive traffic from, such a VLAN attachment. - IPSEC - The VLAN attachment carries only encrypted traffic that is encrypted by an IPsec device, such as an HA VPN gateway or third-party IPsec VPN. VMs cannot directly send traffic to, or receive traffic from, such a VLAN attachment. To use *IPsec-encrypted Cloud Interconnect*, the VLAN attachment must be created with this option. Not currently available publicly. ", + "description": "Indicates the user-supplied encryption option of this VLAN attachment (interconnectAttachment). Can only be specified at attachment creation for PARTNER or DEDICATED attachments. Possible values are: - NONE - This is the default value, which means that the VLAN attachment carries unencrypted traffic. VMs are able to send traffic to, or receive traffic from, such a VLAN attachment. - IPSEC - The VLAN attachment carries only encrypted traffic that is encrypted by an IPsec device, such as an HA VPN gateway or third-party IPsec VPN. VMs cannot directly send traffic to, or receive traffic from, such a VLAN attachment. To use *HA VPN over Cloud Interconnect*, the VLAN attachment must be created with this option. ", "enum": [ "IPSEC", "NONE" ], "enumDescriptions": [ - "The interconnect attachment will carry only encrypted traffic that is encrypted by an IPsec device such as HA VPN gateway; VMs cannot directly send traffic to or receive traffic from such an interconnect attachment. To use IPsec-encrypted Cloud Interconnect, the interconnect attachment must be created with this option.", + "The interconnect attachment will carry only encrypted traffic that is encrypted by an IPsec device such as HA VPN gateway; VMs cannot directly send traffic to or receive traffic from such an interconnect attachment. To use HA VPN over Cloud Interconnect, the interconnect attachment must be created with this option.", "This is the default value, which means the Interconnect Attachment will carry unencrypted traffic. VMs will be able to send traffic to or receive traffic from such interconnect attachment." ], "type": "string" @@ -44253,7 +46644,7 @@ "type": "string" }, "satisfiesPzs": { - "description": "[Output Only] Set to true if the resource satisfies the zone separation organization policy constraints and false otherwise. Defaults to false if the field is not present.", + "description": "[Output Only] Reserved for future use.", "type": "boolean" }, "selfLink": { @@ -44309,7 +46700,7 @@ "type": "string" }, "vlanTag8021q": { - "description": "The IEEE 802.1Q VLAN tag for this attachment, in the range 2-4094. Only specified at creation time.", + "description": "The IEEE 802.1Q VLAN tag for this attachment, in the range 2-4093. Only specified at creation time.", "format": "int32", "type": "integer" } @@ -44726,6 +47117,30 @@ }, "type": "array" }, + "bundleAggregationType": { + "description": "The aggregation type of the bundle interface.", + "enum": [ + "BUNDLE_AGGREGATION_TYPE_LACP", + "BUNDLE_AGGREGATION_TYPE_STATIC" + ], + "enumDescriptions": [ + "LACP is enabled.", + "LACP is disabled." + ], + "type": "string" + }, + "bundleOperationalStatus": { + "description": "The operational status of the bundle interface.", + "enum": [ + "BUNDLE_OPERATIONAL_STATUS_DOWN", + "BUNDLE_OPERATIONAL_STATUS_UP" + ], + "enumDescriptions": [ + "If bundleAggregationType is LACP: LACP is not established and/or all links in the bundle have DOWN operational status. If bundleAggregationType is STATIC: one or more links in the bundle has DOWN operational status.", + "If bundleAggregationType is LACP: LACP is established and at least one link in the bundle has UP operational status. If bundleAggregationType is STATIC: all links in the bundle (typically just one) have UP operational status." + ], + "type": "string" + }, "links": { "description": "A list of InterconnectDiagnostics.LinkStatus objects, describing the status for each link on the Interconnect.", "items": { @@ -44831,6 +47246,18 @@ "lacpStatus": { "$ref": "InterconnectDiagnosticsLinkLACPStatus" }, + "operationalStatus": { + "description": "The operational status of the link.", + "enum": [ + "LINK_OPERATIONAL_STATUS_DOWN", + "LINK_OPERATIONAL_STATUS_UP" + ], + "enumDescriptions": [ + "The interface is unable to communicate with the remote end.", + "The interface has low level communication with the remote end." + ], + "type": "string" + }, "receivingOpticalPower": { "$ref": "InterconnectDiagnosticsLinkOpticalPower", "description": "An InterconnectDiagnostics.LinkOpticalPower object, describing the current value and status of the received light level." @@ -45064,7 +47491,7 @@ "type": "string" }, "supportsPzs": { - "description": "[Output Only] Set to true for locations that support physical zone separation. Defaults to false if the field is not present.", + "description": "[Output Only] Reserved for future use.", "type": "boolean" } }, @@ -45622,7 +48049,7 @@ "id": "LocalizedMessage", "properties": { "locale": { - "description": "The locale used following the specification defined at http://www.rfc-editor.org/rfc/bcp/bcp47.txt. Examples are: \"en-US\", \"fr-CH\", \"es-MX\"", + "description": "The locale used following the specification defined at https://www.rfc-editor.org/rfc/bcp/bcp47.txt. Examples are: \"en-US\", \"fr-CH\", \"es-MX\"", "type": "string" }, "message": { @@ -46630,6 +49057,9 @@ }, "localizedMessage": { "$ref": "LocalizedMessage" + }, + "quotaInfo": { + "$ref": "QuotaExceededInfo" } }, "type": "object" @@ -46707,175 +49137,658 @@ }, "type": "object" }, - "type": "array" - }, - "kind": { - "default": "compute#metadata", - "description": "[Output Only] Type of the resource. Always compute#metadata for metadata.", - "type": "string" + "type": "array" + }, + "kind": { + "default": "compute#metadata", + "description": "[Output Only] Type of the resource. Always compute#metadata for metadata.", + "type": "string" + } + }, + "type": "object" + }, + "MetadataFilter": { + "description": "Opaque filter criteria used by load balancers to restrict routing configuration to a limited set of load balancing proxies. Proxies and sidecars involved in load balancing would typically present metadata to the load balancers that need to match criteria specified here. If a match takes place, the relevant configuration is made available to those proxies. For each metadataFilter in this list, if its filterMatchCriteria is set to MATCH_ANY, at least one of the filterLabels must match the corresponding label provided in the metadata. If its filterMatchCriteria is set to MATCH_ALL, then all of its filterLabels must match with corresponding labels provided in the metadata. An example for using metadataFilters would be: if load balancing involves Envoys, they receive routing configuration when values in metadataFilters match values supplied in of their XDS requests to loadbalancers.", + "id": "MetadataFilter", + "properties": { + "filterLabels": { + "description": "The list of label value pairs that must match labels in the provided metadata based on filterMatchCriteria This list must not be empty and can have at the most 64 entries.", + "items": { + "$ref": "MetadataFilterLabelMatch" + }, + "type": "array" + }, + "filterMatchCriteria": { + "description": "Specifies how individual filter label matches within the list of filterLabels and contributes toward the overall metadataFilter match. Supported values are: - MATCH_ANY: at least one of the filterLabels must have a matching label in the provided metadata. - MATCH_ALL: all filterLabels must have matching labels in the provided metadata. ", + "enum": [ + "MATCH_ALL", + "MATCH_ANY", + "NOT_SET" + ], + "enumDescriptions": [ + "Specifies that all filterLabels must match for the metadataFilter to be considered a match.", + "Specifies that any filterLabel must match for the metadataFilter to be considered a match.", + "Indicates that the match criteria was not set. A metadataFilter must never be created with this value." + ], + "type": "string" + } + }, + "type": "object" + }, + "MetadataFilterLabelMatch": { + "description": "MetadataFilter label name value pairs that are expected to match corresponding labels presented as metadata to the load balancer.", + "id": "MetadataFilterLabelMatch", + "properties": { + "name": { + "description": "Name of metadata label. The name can have a maximum length of 1024 characters and must be at least 1 character long.", + "type": "string" + }, + "value": { + "description": "The value of the label must match the specified value. value can have a maximum length of 1024 characters.", + "type": "string" + } + }, + "type": "object" + }, + "NamedPort": { + "description": "The named port. For example: \u003c\"http\", 80\u003e.", + "id": "NamedPort", + "properties": { + "name": { + "description": "The name for this named port. The name must be 1-63 characters long, and comply with RFC1035.", + "type": "string" + }, + "port": { + "description": "The port number, which can be a value between 1 and 65535.", + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "Network": { + "description": "Represents a VPC Network resource. Networks connect resources to each other and to the internet. For more information, read Virtual Private Cloud (VPC) Network.", + "id": "Network", + "properties": { + "IPv4Range": { + "description": "Deprecated in favor of subnet mode networks. The range of internal addresses that are legal on this network. This range is a CIDR specification, for example: 192.168.0.0/16. Provided by the client when the network is created.", + "pattern": "[0-9]{1,3}(?:\\.[0-9]{1,3}){3}/[0-9]{1,2}", + "type": "string" + }, + "autoCreateSubnetworks": { + "description": "Must be set to create a VPC network. If not set, a legacy network is created. When set to true, the VPC network is created in auto mode. When set to false, the VPC network is created in custom mode. An auto mode VPC network starts with one subnet per region. Each subnet has a predetermined range as described in Auto mode VPC network IP ranges. For custom mode VPC networks, you can add subnets using the subnetworks insert method.", + "type": "boolean" + }, + "creationTimestamp": { + "description": "[Output Only] Creation timestamp in RFC3339 text format.", + "type": "string" + }, + "description": { + "description": "An optional description of this resource. Provide this field when you create the resource.", + "type": "string" + }, + "enableUlaInternalIpv6": { + "description": "Enable ULA internal ipv6 on this network. Enabling this feature will assign a /48 from google defined ULA prefix fd20::/20. .", + "type": "boolean" + }, + "firewallPolicy": { + "description": "[Output Only] URL of the firewall policy the network is associated with.", + "type": "string" + }, + "gatewayIPv4": { + "description": "[Output Only] The gateway address for default routing out of the network, selected by GCP.", + "pattern": "[0-9]{1,3}(?:\\.[0-9]{1,3}){3}", + "type": "string" + }, + "id": { + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64", + "type": "string" + }, + "internalIpv6Range": { + "description": "When enabling ula internal ipv6, caller optionally can specify the /48 range they want from the google defined ULA prefix fd20::/20. The input must be a valid /48 ULA IPv6 address and must be within the fd20::/20. Operation will fail if the speficied /48 is already in used by another resource. If the field is not speficied, then a /48 range will be randomly allocated from fd20::/20 and returned via this field. .", + "type": "string" + }, + "kind": { + "default": "compute#network", + "description": "[Output Only] Type of the resource. Always compute#network for networks.", + "type": "string" + }, + "mtu": { + "description": "Maximum Transmission Unit in bytes. The minimum value for this field is 1300 and the maximum value is 8896. The suggested value is 1500, which is the default MTU used on the Internet, or 8896 if you want to use Jumbo frames. If unspecified, the value defaults to 1460.", + "format": "int32", + "type": "integer" + }, + "name": { + "annotations": { + "required": [ + "compute.networks.insert" + ] + }, + "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`. The first character must be a lowercase letter, and all following characters (except for the last character) must be a dash, lowercase letter, or digit. The last character must be a lowercase letter or digit.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "type": "string" + }, + "networkFirewallPolicyEnforcementOrder": { + "description": "The network firewall policy enforcement order. Can be either AFTER_CLASSIC_FIREWALL or BEFORE_CLASSIC_FIREWALL. Defaults to AFTER_CLASSIC_FIREWALL if the field is not specified.", + "enum": [ + "AFTER_CLASSIC_FIREWALL", + "BEFORE_CLASSIC_FIREWALL" + ], + "enumDescriptions": [ + "", + "" + ], + "type": "string" + }, + "peerings": { + "description": "[Output Only] A list of network peerings for the resource.", + "items": { + "$ref": "NetworkPeering" + }, + "type": "array" + }, + "routingConfig": { + "$ref": "NetworkRoutingConfig", + "description": "The network-level routing configuration for this network. Used by Cloud Router to determine what type of network-wide routing behavior to enforce." + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for the resource.", + "type": "string" + }, + "selfLinkWithId": { + "description": "[Output Only] Server-defined URL for this resource with the resource id.", + "type": "string" + }, + "subnetworks": { + "description": "[Output Only] Server-defined fully-qualified URLs for all subnetworks in this VPC network.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "NetworkAttachment": { + "description": "NetworkAttachments A network attachment resource ...", + "id": "NetworkAttachment", + "properties": { + "connectionEndpoints": { + "description": "[Output Only] An array of connections for all the producers connected to this network attachment.", + "items": { + "$ref": "NetworkAttachmentConnectedEndpoint" + }, + "type": "array" + }, + "connectionPreference": { + "enum": [ + "ACCEPT_AUTOMATIC", + "ACCEPT_MANUAL", + "INVALID" + ], + "enumDescriptions": [ + "", + "", + "" + ], + "type": "string" + }, + "creationTimestamp": { + "description": "[Output Only] Creation timestamp in RFC3339 text format.", + "type": "string" + }, + "description": { + "description": "An optional description of this resource. Provide this property when you create the resource.", + "type": "string" + }, + "fingerprint": { + "description": "[Output Only] Fingerprint of this resource. A hash of the contents stored in this object. This field is used in optimistic locking. An up-to-date fingerprint must be provided in order to patch.", + "format": "byte", + "type": "string" + }, + "id": { + "description": "[Output Only] The unique identifier for the resource type. The server generates this identifier.", + "format": "uint64", + "type": "string" + }, + "kind": { + "default": "compute#networkAttachment", + "description": "[Output Only] Type of the resource.", + "type": "string" + }, + "name": { + "annotations": { + "required": [ + "compute.networkAttachments.insert" + ] + }, + "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "type": "string" + }, + "network": { + "description": "[Output Only] The URL of the network which the Network Attachment belongs to.", + "type": "string" + }, + "producerAcceptLists": { + "description": "Projects that are allowed to connect to this network attachment. The project can be specified using its id or number.", + "items": { + "type": "string" + }, + "type": "array" + }, + "producerRejectLists": { + "description": "Projects that are not allowed to connect to this network attachment. The project can be specified using its id or number.", + "items": { + "type": "string" + }, + "type": "array" + }, + "region": { + "description": "[Output Only] URL of the region where the network attachment resides. This field applies only to the region resource. You must specify this field as part of the HTTP request URL. It is not settable as a field in the request body.", + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for the resource.", + "type": "string" + }, + "selfLinkWithId": { + "description": "[Output Only] Server-defined URL for this resource's resource id.", + "type": "string" + }, + "subnetworks": { + "description": "An array of URLs where each entry is the URL of a subnet provided by the service consumer to use for endpoints in the producers that connect to this network attachment.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "NetworkAttachmentAggregatedList": { + "description": "Contains a list of NetworkAttachmentsScopedList.", + "id": "NetworkAttachmentAggregatedList", + "properties": { + "id": { + "description": "[Output Only] Unique identifier for the resource; defined by the server.", + "type": "string" + }, + "items": { + "additionalProperties": { + "$ref": "NetworkAttachmentsScopedList", + "description": "Name of the scope containing this set of NetworkAttachments." + }, + "description": "A list of NetworkAttachmentsScopedList resources.", + "type": "object" + }, + "kind": { + "default": "compute#networkAttachmentAggregatedList", + "type": "string" + }, + "nextPageToken": { + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for this resource.", + "type": "string" + }, + "warning": { + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", + "LARGE_DEPLOYMENT_WARNING", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "Warning about failed cleanup of transient changes made by a failed operation.", + "A link to a deprecated resource was created.", + "When deploying and at least one of the resources has a type marked as deprecated", + "The user created a boot disk that is larger than image size.", + "When deploying and at least one of the resources has a type marked as experimental", + "Warning that is present in an external api call", + "Warning that value of a field has been overridden. Deprecated unused field.", + "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", + "When deploying a deployment with a exceedingly large number of resources", + "A resource depends on a missing type", + "The route's nextHopIp address is not assigned to an instance on the network.", + "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", + "The route's nextHopInstance URL refers to an instance that does not exist.", + "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", + "The route's next hop instance does not have a status of RUNNING.", + "Error which is not critical. We decided to continue the process despite the mentioned error.", + "No results are present on a particular list page.", + "Success is reported, but some results may be missing due to errors", + "The user attempted to use a resource that requires a TOS they have not accepted.", + "Warning that a resource is in use.", + "One or more of the resources set to auto-delete could not be deleted because they were in use.", + "When a resource schema validation is ignored.", + "Instance template used in instance group manager is valid as such, but its application does not make a lot of sense, because it allows only single instance in instance group.", + "When undeclared properties in the schema are present", + "A given scope cannot be reached." + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example: \"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" } ", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } + }, + "type": "object" } }, "type": "object" }, - "MetadataFilter": { - "description": "Opaque filter criteria used by load balancers to restrict routing configuration to a limited set of load balancing proxies. Proxies and sidecars involved in load balancing would typically present metadata to the load balancers that need to match criteria specified here. If a match takes place, the relevant configuration is made available to those proxies. For each metadataFilter in this list, if its filterMatchCriteria is set to MATCH_ANY, at least one of the filterLabels must match the corresponding label provided in the metadata. If its filterMatchCriteria is set to MATCH_ALL, then all of its filterLabels must match with corresponding labels provided in the metadata. An example for using metadataFilters would be: if load balancing involves Envoys, they receive routing configuration when values in metadataFilters match values supplied in of their XDS requests to loadbalancers.", - "id": "MetadataFilter", + "NetworkAttachmentConnectedEndpoint": { + "description": "[Output Only] A connection connected to this network attachment.", + "id": "NetworkAttachmentConnectedEndpoint", "properties": { - "filterLabels": { - "description": "The list of label value pairs that must match labels in the provided metadata based on filterMatchCriteria This list must not be empty and can have at the most 64 entries.", + "ipAddress": { + "description": "The IP address assigned to the producer instance network interface. This value will be a range in case of Serverless.", + "type": "string" + }, + "projectIdOrNum": { + "description": "The project id or number of the interface to which the IP was assigned.", + "type": "string" + }, + "secondaryIpCidrRanges": { + "description": "Alias IP ranges from the same subnetwork", "items": { - "$ref": "MetadataFilterLabelMatch" + "type": "string" }, "type": "array" }, - "filterMatchCriteria": { - "description": "Specifies how individual filter label matches within the list of filterLabels and contributes toward the overall metadataFilter match. Supported values are: - MATCH_ANY: at least one of the filterLabels must have a matching label in the provided metadata. - MATCH_ALL: all filterLabels must have matching labels in the provided metadata. ", + "status": { + "description": "The status of a connected endpoint to this network attachment.", "enum": [ - "MATCH_ALL", - "MATCH_ANY", - "NOT_SET" + "ACCEPTED", + "CLOSED", + "NEEDS_ATTENTION", + "PENDING", + "REJECTED", + "STATUS_UNSPECIFIED" ], "enumDescriptions": [ - "Specifies that all filterLabels must match for the metadataFilter to be considered a match.", - "Specifies that any filterLabel must match for the metadataFilter to be considered a match.", - "Indicates that the match criteria was not set. A metadataFilter must never be created with this value." + "The consumer allows traffic from the producer to reach its VPC.", + "The consumer network attachment no longer exists.", + "The consumer needs to take further action before traffic can be served.", + "The consumer neither allows nor prohibits traffic from the producer to reach its VPC.", + "The consumer prohibits traffic from the producer to reach its VPC.", + "" ], "type": "string" - } - }, - "type": "object" - }, - "MetadataFilterLabelMatch": { - "description": "MetadataFilter label name value pairs that are expected to match corresponding labels presented as metadata to the load balancer.", - "id": "MetadataFilterLabelMatch", - "properties": { - "name": { - "description": "Name of metadata label. The name can have a maximum length of 1024 characters and must be at least 1 character long.", - "type": "string" }, - "value": { - "description": "The value of the label must match the specified value. value can have a maximum length of 1024 characters.", - "type": "string" - } - }, - "type": "object" - }, - "NamedPort": { - "description": "The named port. For example: \u003c\"http\", 80\u003e.", - "id": "NamedPort", - "properties": { - "name": { - "description": "The name for this named port. The name must be 1-63 characters long, and comply with RFC1035.", + "subnetwork": { + "description": "The subnetwork used to assign the IP to the producer instance network interface.", "type": "string" - }, - "port": { - "description": "The port number, which can be a value between 1 and 65535.", - "format": "int32", - "type": "integer" } }, "type": "object" }, - "Network": { - "description": "Represents a VPC Network resource. Networks connect resources to each other and to the internet. For more information, read Virtual Private Cloud (VPC) Network.", - "id": "Network", + "NetworkAttachmentList": { + "id": "NetworkAttachmentList", "properties": { - "IPv4Range": { - "description": "Deprecated in favor of subnet mode networks. The range of internal addresses that are legal on this network. This range is a CIDR specification, for example: 192.168.0.0/16. Provided by the client when the network is created.", - "pattern": "[0-9]{1,3}(?:\\.[0-9]{1,3}){3}/[0-9]{1,2}", - "type": "string" - }, - "autoCreateSubnetworks": { - "description": "Must be set to create a VPC network. If not set, a legacy network is created. When set to true, the VPC network is created in auto mode. When set to false, the VPC network is created in custom mode. An auto mode VPC network starts with one subnet per region. Each subnet has a predetermined range as described in Auto mode VPC network IP ranges. For custom mode VPC networks, you can add subnets using the subnetworks insert method.", - "type": "boolean" - }, - "creationTimestamp": { - "description": "[Output Only] Creation timestamp in RFC3339 text format.", - "type": "string" - }, - "description": { - "description": "An optional description of this resource. Provide this field when you create the resource.", - "type": "string" - }, - "enableUlaInternalIpv6": { - "description": "Enable ULA internal ipv6 on this network. Enabling this feature will assign a /48 from google defined ULA prefix fd20::/20. .", - "type": "boolean" - }, - "firewallPolicy": { - "description": "[Output Only] URL of the firewall policy the network is associated with.", - "type": "string" - }, - "gatewayIPv4": { - "description": "[Output Only] The gateway address for default routing out of the network, selected by GCP.", - "pattern": "[0-9]{1,3}(?:\\.[0-9]{1,3}){3}", - "type": "string" - }, "id": { - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", - "format": "uint64", + "description": "[Output Only] Unique identifier for the resource; defined by the server.", "type": "string" }, - "internalIpv6Range": { - "description": "When enabling ula internal ipv6, caller optionally can specify the /48 range they want from the google defined ULA prefix fd20::/20. The input must be a valid /48 ULA IPv6 address and must be within the fd20::/20. Operation will fail if the speficied /48 is already in used by another resource. If the field is not speficied, then a /48 range will be randomly allocated from fd20::/20 and returned via this field. .", - "type": "string" + "items": { + "description": "A list of NetworkAttachment resources.", + "items": { + "$ref": "NetworkAttachment" + }, + "type": "array" }, "kind": { - "default": "compute#network", - "description": "[Output Only] Type of the resource. Always compute#network for networks.", + "default": "compute#networkAttachmentList", "type": "string" }, - "mtu": { - "description": "Maximum Transmission Unit in bytes. The minimum value for this field is 1460 and the maximum value is 1500 bytes. If unspecified, defaults to 1460.", - "format": "int32", - "type": "integer" - }, - "name": { - "annotations": { - "required": [ - "compute.networks.insert" - ] - }, - "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`. The first character must be a lowercase letter, and all following characters (except for the last character) must be a dash, lowercase letter, or digit. The last character must be a lowercase letter or digit.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "nextPageToken": { + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", "type": "string" }, - "networkFirewallPolicyEnforcementOrder": { - "description": "The network firewall policy enforcement order. Can be either AFTER_CLASSIC_FIREWALL or BEFORE_CLASSIC_FIREWALL. Defaults to AFTER_CLASSIC_FIREWALL if the field is not specified.", - "enum": [ - "AFTER_CLASSIC_FIREWALL", - "BEFORE_CLASSIC_FIREWALL" - ], - "enumDescriptions": [ - "", - "" - ], + "selfLink": { + "description": "[Output Only] Server-defined URL for this resource.", "type": "string" }, - "peerings": { - "description": "[Output Only] A list of network peerings for the resource.", + "warning": { + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", + "LARGE_DEPLOYMENT_WARNING", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "Warning about failed cleanup of transient changes made by a failed operation.", + "A link to a deprecated resource was created.", + "When deploying and at least one of the resources has a type marked as deprecated", + "The user created a boot disk that is larger than image size.", + "When deploying and at least one of the resources has a type marked as experimental", + "Warning that is present in an external api call", + "Warning that value of a field has been overridden. Deprecated unused field.", + "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", + "When deploying a deployment with a exceedingly large number of resources", + "A resource depends on a missing type", + "The route's nextHopIp address is not assigned to an instance on the network.", + "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", + "The route's nextHopInstance URL refers to an instance that does not exist.", + "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", + "The route's next hop instance does not have a status of RUNNING.", + "Error which is not critical. We decided to continue the process despite the mentioned error.", + "No results are present on a particular list page.", + "Success is reported, but some results may be missing due to errors", + "The user attempted to use a resource that requires a TOS they have not accepted.", + "Warning that a resource is in use.", + "One or more of the resources set to auto-delete could not be deleted because they were in use.", + "When a resource schema validation is ignored.", + "Instance template used in instance group manager is valid as such, but its application does not make a lot of sense, because it allows only single instance in instance group.", + "When undeclared properties in the schema are present", + "A given scope cannot be reached." + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example: \"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" } ", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "NetworkAttachmentsScopedList": { + "id": "NetworkAttachmentsScopedList", + "properties": { + "networkAttachments": { + "description": "A list of NetworkAttachments contained in this scope.", "items": { - "$ref": "NetworkPeering" + "$ref": "NetworkAttachment" }, "type": "array" }, - "routingConfig": { - "$ref": "NetworkRoutingConfig", - "description": "The network-level routing configuration for this network. Used by Cloud Router to determine what type of network-wide routing behavior to enforce." - }, - "selfLink": { - "description": "[Output Only] Server-defined URL for the resource.", - "type": "string" - }, - "selfLinkWithId": { - "description": "[Output Only] Server-defined URL for this resource with the resource id.", - "type": "string" - }, - "subnetworks": { - "description": "[Output Only] Server-defined fully-qualified URLs for all subnetworks in this VPC network.", - "items": { - "type": "string" + "warning": { + "description": "Informational warning which replaces the list of network attachments when the list is empty.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", + "LARGE_DEPLOYMENT_WARNING", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "Warning about failed cleanup of transient changes made by a failed operation.", + "A link to a deprecated resource was created.", + "When deploying and at least one of the resources has a type marked as deprecated", + "The user created a boot disk that is larger than image size.", + "When deploying and at least one of the resources has a type marked as experimental", + "Warning that is present in an external api call", + "Warning that value of a field has been overridden. Deprecated unused field.", + "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", + "When deploying a deployment with a exceedingly large number of resources", + "A resource depends on a missing type", + "The route's nextHopIp address is not assigned to an instance on the network.", + "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", + "The route's nextHopInstance URL refers to an instance that does not exist.", + "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", + "The route's next hop instance does not have a status of RUNNING.", + "Error which is not critical. We decided to continue the process despite the mentioned error.", + "No results are present on a particular list page.", + "Success is reported, but some results may be missing due to errors", + "The user attempted to use a resource that requires a TOS they have not accepted.", + "Warning that a resource is in use.", + "One or more of the resources set to auto-delete could not be deleted because they were in use.", + "When a resource schema validation is ignored.", + "Instance template used in instance group manager is valid as such, but its application does not make a lot of sense, because it allows only single instance in instance group.", + "When undeclared properties in the schema are present", + "A given scope cannot be reached." + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example: \"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" } ", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } }, - "type": "array" + "type": "object" } }, "type": "object" @@ -47270,6 +50183,9 @@ ], "type": "string" }, + "pscData": { + "$ref": "NetworkEndpointGroupPscData" + }, "pscTargetService": { "description": "The target service url used to set up private service connection to a Google API or a PSC Producer Service Attachment. An example value is: \"asia-northeast3-cloudkms.googleapis.com\"", "type": "string" @@ -47425,178 +50341,214 @@ }, "type": "object" }, - "NetworkEndpointGroupAppEngine": { - "description": "Configuration for an App Engine network endpoint group (NEG). The service is optional, may be provided explicitly or in the URL mask. The version is optional and can only be provided explicitly or in the URL mask when service is present. Note: App Engine service must be in the same project and located in the same region as the Serverless NEG.", - "id": "NetworkEndpointGroupAppEngine", - "properties": { - "service": { - "description": "Optional serving service. The service name is case-sensitive and must be 1-63 characters long. Example value: \"default\", \"my-service\".", - "type": "string" - }, - "urlMask": { - "description": "A template to parse service and version fields from a request URL. URL mask allows for routing to multiple App Engine services without having to create multiple Network Endpoint Groups and backend services. For example, the request URLs \"foo1-dot-appname.appspot.com/v1\" and \"foo1-dot-appname.appspot.com/v2\" can be backed by the same Serverless NEG with URL mask \"\u003cservice\u003e-dot-appname.appspot.com/\u003cversion\u003e\". The URL mask will parse them to { service = \"foo1\", version = \"v1\" } and { service = \"foo1\", version = \"v2\" } respectively.", - "type": "string" - }, - "version": { - "description": "Optional serving version. The version name is case-sensitive and must be 1-100 characters long. Example value: \"v1\", \"v2\".", - "type": "string" - } - }, - "type": "object" - }, - "NetworkEndpointGroupCloudFunction": { - "description": "Configuration for a Cloud Function network endpoint group (NEG). The function must be provided explicitly or in the URL mask. Note: Cloud Function must be in the same project and located in the same region as the Serverless NEG.", - "id": "NetworkEndpointGroupCloudFunction", + "NetworkEndpointGroupAppEngine": { + "description": "Configuration for an App Engine network endpoint group (NEG). The service is optional, may be provided explicitly or in the URL mask. The version is optional and can only be provided explicitly or in the URL mask when service is present. Note: App Engine service must be in the same project and located in the same region as the Serverless NEG.", + "id": "NetworkEndpointGroupAppEngine", + "properties": { + "service": { + "description": "Optional serving service. The service name is case-sensitive and must be 1-63 characters long. Example value: \"default\", \"my-service\".", + "type": "string" + }, + "urlMask": { + "description": "A template to parse service and version fields from a request URL. URL mask allows for routing to multiple App Engine services without having to create multiple Network Endpoint Groups and backend services. For example, the request URLs \"foo1-dot-appname.appspot.com/v1\" and \"foo1-dot-appname.appspot.com/v2\" can be backed by the same Serverless NEG with URL mask \"\u003cservice\u003e-dot-appname.appspot.com/\u003cversion\u003e\". The URL mask will parse them to { service = \"foo1\", version = \"v1\" } and { service = \"foo1\", version = \"v2\" } respectively.", + "type": "string" + }, + "version": { + "description": "Optional serving version. The version name is case-sensitive and must be 1-100 characters long. Example value: \"v1\", \"v2\".", + "type": "string" + } + }, + "type": "object" + }, + "NetworkEndpointGroupCloudFunction": { + "description": "Configuration for a Cloud Function network endpoint group (NEG). The function must be provided explicitly or in the URL mask. Note: Cloud Function must be in the same project and located in the same region as the Serverless NEG.", + "id": "NetworkEndpointGroupCloudFunction", + "properties": { + "function": { + "description": "A user-defined name of the Cloud Function. The function name is case-sensitive and must be 1-63 characters long. Example value: \"func1\".", + "type": "string" + }, + "urlMask": { + "description": "A template to parse function field from a request URL. URL mask allows for routing to multiple Cloud Functions without having to create multiple Network Endpoint Groups and backend services. For example, request URLs \" mydomain.com/function1\" and \"mydomain.com/function2\" can be backed by the same Serverless NEG with URL mask \"/\u003cfunction\u003e\". The URL mask will parse them to { function = \"function1\" } and { function = \"function2\" } respectively.", + "type": "string" + } + }, + "type": "object" + }, + "NetworkEndpointGroupCloudRun": { + "description": "Configuration for a Cloud Run network endpoint group (NEG). The service must be provided explicitly or in the URL mask. The tag is optional, may be provided explicitly or in the URL mask. Note: Cloud Run service must be in the same project and located in the same region as the Serverless NEG.", + "id": "NetworkEndpointGroupCloudRun", + "properties": { + "service": { + "description": "Cloud Run service is the main resource of Cloud Run. The service must be 1-63 characters long, and comply with RFC1035. Example value: \"run-service\".", + "type": "string" + }, + "tag": { + "description": "Optional Cloud Run tag represents the \"named-revision\" to provide additional fine-grained traffic routing information. The tag must be 1-63 characters long, and comply with RFC1035. Example value: \"revision-0010\".", + "type": "string" + }, + "urlMask": { + "description": "A template to parse \u003cservice\u003e and \u003ctag\u003e fields from a request URL. URL mask allows for routing to multiple Run services without having to create multiple network endpoint groups and backend services. For example, request URLs \"foo1.domain.com/bar1\" and \"foo1.domain.com/bar2\" can be backed by the same Serverless Network Endpoint Group (NEG) with URL mask \"\u003ctag\u003e.domain.com/\u003cservice\u003e\". The URL mask will parse them to { service=\"bar1\", tag=\"foo1\" } and { service=\"bar2\", tag=\"foo2\" } respectively.", + "type": "string" + } + }, + "type": "object" + }, + "NetworkEndpointGroupList": { + "id": "NetworkEndpointGroupList", + "properties": { + "id": { + "description": "[Output Only] Unique identifier for the resource; defined by the server.", + "type": "string" + }, + "items": { + "description": "A list of NetworkEndpointGroup resources.", + "items": { + "$ref": "NetworkEndpointGroup" + }, + "type": "array" + }, + "kind": { + "default": "compute#networkEndpointGroupList", + "description": "[Output Only] The resource type, which is always compute#networkEndpointGroupList for network endpoint group lists.", + "type": "string" + }, + "nextPageToken": { + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for this resource.", + "type": "string" + }, + "warning": { + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", + "LARGE_DEPLOYMENT_WARNING", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "Warning about failed cleanup of transient changes made by a failed operation.", + "A link to a deprecated resource was created.", + "When deploying and at least one of the resources has a type marked as deprecated", + "The user created a boot disk that is larger than image size.", + "When deploying and at least one of the resources has a type marked as experimental", + "Warning that is present in an external api call", + "Warning that value of a field has been overridden. Deprecated unused field.", + "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", + "When deploying a deployment with a exceedingly large number of resources", + "A resource depends on a missing type", + "The route's nextHopIp address is not assigned to an instance on the network.", + "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", + "The route's nextHopInstance URL refers to an instance that does not exist.", + "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", + "The route's next hop instance does not have a status of RUNNING.", + "Error which is not critical. We decided to continue the process despite the mentioned error.", + "No results are present on a particular list page.", + "Success is reported, but some results may be missing due to errors", + "The user attempted to use a resource that requires a TOS they have not accepted.", + "Warning that a resource is in use.", + "One or more of the resources set to auto-delete could not be deleted because they were in use.", + "When a resource schema validation is ignored.", + "Instance template used in instance group manager is valid as such, but its application does not make a lot of sense, because it allows only single instance in instance group.", + "When undeclared properties in the schema are present", + "A given scope cannot be reached." + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example: \"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" } ", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "NetworkEndpointGroupPscData": { + "description": "All data that is specifically relevant to only network endpoint groups of type PRIVATE_SERVICE_CONNECT.", + "id": "NetworkEndpointGroupPscData", "properties": { - "function": { - "description": "A user-defined name of the Cloud Function. The function name is case-sensitive and must be 1-63 characters long. Example value: \"func1\".", - "type": "string" - }, - "urlMask": { - "description": "A template to parse function field from a request URL. URL mask allows for routing to multiple Cloud Functions without having to create multiple Network Endpoint Groups and backend services. For example, request URLs \" mydomain.com/function1\" and \"mydomain.com/function2\" can be backed by the same Serverless NEG with URL mask \"/\u003cfunction\u003e\". The URL mask will parse them to { function = \"function1\" } and { function = \"function2\" } respectively.", - "type": "string" - } - }, - "type": "object" - }, - "NetworkEndpointGroupCloudRun": { - "description": "Configuration for a Cloud Run network endpoint group (NEG). The service must be provided explicitly or in the URL mask. The tag is optional, may be provided explicitly or in the URL mask. Note: Cloud Run service must be in the same project and located in the same region as the Serverless NEG.", - "id": "NetworkEndpointGroupCloudRun", - "properties": { - "service": { - "description": "Cloud Run service is the main resource of Cloud Run. The service must be 1-63 characters long, and comply with RFC1035. Example value: \"run-service\".", + "consumerPscAddress": { + "description": "[Output Only] Address allocated from given subnetwork for PSC. This IP address acts as a VIP for a PSC NEG, allowing it to act as an endpoint in L7 PSC-XLB.", "type": "string" }, - "tag": { - "description": "Optional Cloud Run tag represents the \"named-revision\" to provide additional fine-grained traffic routing information. The tag must be 1-63 characters long, and comply with RFC1035. Example value: \"revision-0010\".", + "pscConnectionId": { + "description": "[Output Only] The PSC connection id of the PSC Network Endpoint Group Consumer.", + "format": "uint64", "type": "string" }, - "urlMask": { - "description": "A template to parse \u003cservice\u003e and \u003ctag\u003e fields from a request URL. URL mask allows for routing to multiple Run services without having to create multiple network endpoint groups and backend services. For example, request URLs \"foo1.domain.com/bar1\" and \"foo1.domain.com/bar2\" can be backed by the same Serverless Network Endpoint Group (NEG) with URL mask \"\u003ctag\u003e.domain.com/\u003cservice\u003e\". The URL mask will parse them to { service=\"bar1\", tag=\"foo1\" } and { service=\"bar2\", tag=\"foo2\" } respectively.", + "pscConnectionStatus": { + "description": "[Output Only] The connection status of the PSC Forwarding Rule.", + "enum": [ + "ACCEPTED", + "CLOSED", + "NEEDS_ATTENTION", + "PENDING", + "REJECTED", + "STATUS_UNSPECIFIED" + ], + "enumDescriptions": [ + "The connection has been accepted by the producer.", + "The connection has been closed by the producer and will not serve traffic going forward.", + "The connection has been accepted by the producer, but the producer needs to take further action before the forwarding rule can serve traffic.", + "The connection is pending acceptance by the producer.", + "The connection has been rejected by the producer.", + "" + ], "type": "string" } }, "type": "object" }, - "NetworkEndpointGroupList": { - "id": "NetworkEndpointGroupList", - "properties": { - "id": { - "description": "[Output Only] Unique identifier for the resource; defined by the server.", - "type": "string" - }, - "items": { - "description": "A list of NetworkEndpointGroup resources.", - "items": { - "$ref": "NetworkEndpointGroup" - }, - "type": "array" - }, - "kind": { - "default": "compute#networkEndpointGroupList", - "description": "[Output Only] The resource type, which is always compute#networkEndpointGroupList for network endpoint group lists.", - "type": "string" - }, - "nextPageToken": { - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", - "type": "string" - }, - "selfLink": { - "description": "[Output Only] Server-defined URL for this resource.", - "type": "string" - }, - "warning": { - "description": "[Output Only] Informational warning message.", - "properties": { - "code": { - "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", - "enum": [ - "CLEANUP_FAILED", - "DEPRECATED_RESOURCE_USED", - "DEPRECATED_TYPE_USED", - "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", - "EXPERIMENTAL_TYPE_USED", - "EXTERNAL_API_WARNING", - "FIELD_VALUE_OVERRIDEN", - "INJECTED_KERNELS_DEPRECATED", - "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", - "LARGE_DEPLOYMENT_WARNING", - "MISSING_TYPE_DEPENDENCY", - "NEXT_HOP_ADDRESS_NOT_ASSIGNED", - "NEXT_HOP_CANNOT_IP_FORWARD", - "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", - "NEXT_HOP_INSTANCE_NOT_FOUND", - "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", - "NEXT_HOP_NOT_RUNNING", - "NOT_CRITICAL_ERROR", - "NO_RESULTS_ON_PAGE", - "PARTIAL_SUCCESS", - "REQUIRED_TOS_AGREEMENT", - "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", - "RESOURCE_NOT_DELETED", - "SCHEMA_VALIDATION_IGNORED", - "SINGLE_INSTANCE_PROPERTY_TEMPLATE", - "UNDECLARED_PROPERTIES", - "UNREACHABLE" - ], - "enumDescriptions": [ - "Warning about failed cleanup of transient changes made by a failed operation.", - "A link to a deprecated resource was created.", - "When deploying and at least one of the resources has a type marked as deprecated", - "The user created a boot disk that is larger than image size.", - "When deploying and at least one of the resources has a type marked as experimental", - "Warning that is present in an external api call", - "Warning that value of a field has been overridden. Deprecated unused field.", - "The operation involved use of an injected kernel, which is deprecated.", - "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", - "When deploying a deployment with a exceedingly large number of resources", - "A resource depends on a missing type", - "The route's nextHopIp address is not assigned to an instance on the network.", - "The route's next hop instance cannot ip forward.", - "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", - "The route's nextHopInstance URL refers to an instance that does not exist.", - "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", - "The route's next hop instance does not have a status of RUNNING.", - "Error which is not critical. We decided to continue the process despite the mentioned error.", - "No results are present on a particular list page.", - "Success is reported, but some results may be missing due to errors", - "The user attempted to use a resource that requires a TOS they have not accepted.", - "Warning that a resource is in use.", - "One or more of the resources set to auto-delete could not be deleted because they were in use.", - "When a resource schema validation is ignored.", - "Instance template used in instance group manager is valid as such, but its application does not make a lot of sense, because it allows only single instance in instance group.", - "When undeclared properties in the schema are present", - "A given scope cannot be reached." - ], - "type": "string" - }, - "data": { - "description": "[Output Only] Metadata about this warning in key: value format. For example: \"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" } ", - "items": { - "properties": { - "key": { - "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", - "type": "string" - }, - "value": { - "description": "[Output Only] A warning data value corresponding to the key.", - "type": "string" - } - }, - "type": "object" - }, - "type": "array" - }, - "message": { - "description": "[Output Only] A human-readable description of the warning code.", - "type": "string" - } - }, - "type": "object" - } - }, - "type": "object" - }, "NetworkEndpointGroupsAttachEndpointsRequest": { "id": "NetworkEndpointGroupsAttachEndpointsRequest", "properties": { @@ -47923,7 +50875,7 @@ "type": "string" }, "ipv6Address": { - "description": "An IPv6 internal network address for this network interface.", + "description": "An IPv6 internal network address for this network interface. To use a static internal IP address, it must be unused and in the same region as the instance's zone. If not specified, Google Cloud will automatically assign an internal IPv6 address from the instance's subnetwork.", "type": "string" }, "kind": { @@ -47939,6 +50891,10 @@ "description": "URL of the VPC network resource for this instance. When creating an instance, if neither the network nor the subnetwork is specified, the default network global/networks/default is used. If the selected project doesn't have the default network, you must specify a network or subnet. If the network is not specified but the subnetwork is specified, the network is inferred. If you specify this property, you can specify the network as a full or partial URL. For example, the following are all valid URLs: - https://www.googleapis.com/compute/v1/projects/project/global/networks/ network - projects/project/global/networks/network - global/networks/default ", "type": "string" }, + "networkAttachment": { + "description": "The URL of the network attachment that this interface should connect to in the following format: projects/{project_number}/regions/{region_name}/networkAttachments/{network_attachment_name}.", + "type": "string" + }, "networkIP": { "description": "An IPv4 internal IP address to assign to the instance for this network interface. If not specified by the user, an unused internal IP is assigned by the system.", "type": "string" @@ -48379,6 +51335,10 @@ "description": "[Output Only] Server-defined URL for the resource.", "type": "string" }, + "shareSettings": { + "$ref": "ShareSettings", + "description": "Share-settings for the node group" + }, "size": { "description": "[Output Only] The total number of nodes in the node group.", "format": "int32", @@ -48710,6 +51670,10 @@ }, "type": "array" }, + "consumedResources": { + "$ref": "InstanceConsumptionInfo", + "description": "Node resources that are reserved by all instances." + }, "cpuOvercommitType": { "description": "CPU overcommit.", "enum": [ @@ -48731,6 +51695,13 @@ }, "type": "array" }, + "instanceConsumptionData": { + "description": "Instance data that shows consumed resources on the node.", + "items": { + "$ref": "InstanceConsumptionData" + }, + "type": "array" + }, "instances": { "description": "Instances scheduled on this node.", "items": { @@ -48774,6 +51745,10 @@ "" ], "type": "string" + }, + "totalResources": { + "$ref": "InstanceConsumptionInfo", + "description": "Total amount of available resources on the node." } }, "type": "object" @@ -49098,7 +52073,7 @@ }, "nodeTypeFlexibility": { "$ref": "NodeTemplateNodeTypeFlexibility", - "description": "The flexible properties of the desired node type. Node groups that use this node template will create nodes of a type that matches these properties. This field is mutually exclusive with the node_type property; you can only define one or the other, but not both." + "description": "Do not use. Instead, use the node_type property." }, "region": { "description": "[Output Only] The name of the region where the node template resides, such as us-central1.", @@ -49969,7 +52944,7 @@ }, "resendInterval": { "$ref": "Duration", - "description": "Optional. This field is used to configure how often to send a full update of all non-healthy backends. If unspecified, full updates are not sent. If specified, must be in the range between 600 seconds to 3600 seconds. Nanos are disallowed." + "description": "Optional. This field is used to configure how often to send a full update of all non-healthy backends. If unspecified, full updates are not sent. If specified, must be in the range between 600 seconds to 3600 seconds. Nanos are disallowed. Can only be set for regional notification endpoints." }, "retryDurationSec": { "description": "How much time (in seconds) is spent attempting notification retries until a successful response is received. Default is 30s. Limit is 20m (1200s). Must be a positive number.", @@ -50141,6 +53116,9 @@ }, "localizedMessage": { "$ref": "LocalizedMessage" + }, + "quotaInfo": { + "$ref": "QuotaExceededInfo" } }, "type": "object" @@ -50703,22 +53681,22 @@ "description": "The base time that a host is ejected for. The real ejection time is equal to the base ejection time multiplied by the number of times the host has been ejected. Defaults to 30000ms or 30s." }, "consecutiveErrors": { - "description": "Number of errors before a host is ejected from the connection pool. When the backend host is accessed over HTTP, a 5xx return code qualifies as an error. Defaults to 5.", + "description": "Number of errors before a host is ejected from the connection pool. When the backend host is accessed over HTTP, a 5xx return code qualifies as an error. Defaults to 5. Not supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true.", "format": "int32", "type": "integer" }, "consecutiveGatewayFailure": { - "description": "The number of consecutive gateway failures (502, 503, 504 status or connection errors that are mapped to one of those status codes) before a consecutive gateway failure ejection occurs. Defaults to 3.", + "description": "The number of consecutive gateway failures (502, 503, 504 status or connection errors that are mapped to one of those status codes) before a consecutive gateway failure ejection occurs. Defaults to 3. Not supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true.", "format": "int32", "type": "integer" }, "enforcingConsecutiveErrors": { - "description": "The percentage chance that a host will be actually ejected when an outlier status is detected through consecutive 5xx. This setting can be used to disable ejection or to ramp it up slowly. Defaults to 0.", + "description": "The percentage chance that a host will be actually ejected when an outlier status is detected through consecutive 5xx. This setting can be used to disable ejection or to ramp it up slowly. Defaults to 0. Not supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true.", "format": "int32", "type": "integer" }, "enforcingConsecutiveGatewayFailure": { - "description": "The percentage chance that a host will be actually ejected when an outlier status is detected through consecutive gateway failures. This setting can be used to disable ejection or to ramp it up slowly. Defaults to 100.", + "description": "The percentage chance that a host will be actually ejected when an outlier status is detected through consecutive gateway failures. This setting can be used to disable ejection or to ramp it up slowly. Defaults to 100. Not supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true.", "format": "int32", "type": "integer" }, @@ -51367,7 +54345,7 @@ "properties": { "defaultRouteAction": { "$ref": "HttpRouteAction", - "description": "defaultRouteAction takes effect when none of the pathRules or routeRules match. The load balancer performs advanced routing actions, such as URL rewrites and header transformations, before forwarding the request to the selected backend. If defaultRouteAction specifies any weightedBackendServices, defaultService must not be set. Conversely if defaultService is set, defaultRouteAction cannot contain any weightedBackendServices. Only one of defaultRouteAction or defaultUrlRedirect must be set. UrlMaps for external HTTP(S) load balancers support only the urlRewrite action within a path matcher's defaultRouteAction." + "description": "defaultRouteAction takes effect when none of the pathRules or routeRules match. The load balancer performs advanced routing actions, such as URL rewrites and header transformations, before forwarding the request to the selected backend. If defaultRouteAction specifies any weightedBackendServices, defaultService must not be set. Conversely if defaultService is set, defaultRouteAction cannot contain any weightedBackendServices. Only one of defaultRouteAction or defaultUrlRedirect must be set. URL maps for Classic external HTTP(S) load balancers only support the urlRewrite action within a path matcher's defaultRouteAction." }, "defaultService": { "description": "The full or partial URL to the BackendService resource. This URL is used if none of the pathRules or routeRules defined by this PathMatcher are matched. For example, the following are all valid URLs to a BackendService resource: - https://www.googleapis.com/compute/v1/projects/project /global/backendServices/backendService - compute/v1/projects/project/global/backendServices/backendService - global/backendServices/backendService If defaultRouteAction is also specified, advanced routing actions, such as URL rewrites, take effect before sending the request to the backend. However, if defaultService is specified, defaultRouteAction cannot contain any weightedBackendServices. Conversely, if defaultRouteAction specifies any weightedBackendServices, defaultService must not be specified. Only one of defaultService, defaultUrlRedirect , or defaultRouteAction.weightedBackendService must be set. Authorization requires one or more of the following Google IAM permissions on the specified resource default_service: - compute.backendBuckets.use - compute.backendServices.use ", @@ -51419,7 +54397,7 @@ }, "routeAction": { "$ref": "HttpRouteAction", - "description": "In response to a matching path, the load balancer performs advanced routing actions, such as URL rewrites and header transformations, before forwarding the request to the selected backend. If routeAction specifies any weightedBackendServices, service must not be set. Conversely if service is set, routeAction cannot contain any weightedBackendServices. Only one of routeAction or urlRedirect must be set. URL maps for external HTTP(S) load balancers support only the urlRewrite action within a path rule's routeAction." + "description": "In response to a matching path, the load balancer performs advanced routing actions, such as URL rewrites and header transformations, before forwarding the request to the selected backend. If routeAction specifies any weightedBackendServices, service must not be set. Conversely if service is set, routeAction cannot contain any weightedBackendServices. Only one of routeAction or urlRedirect must be set. URL maps for Classic external HTTP(S) load balancers only support the urlRewrite action within a path rule's routeAction." }, "service": { "description": "The full or partial URL of the backend service resource to which traffic is directed if this rule is matched. If routeAction is also specified, advanced routing actions, such as URL rewrites, take effect before sending the request to the backend. However, if service is specified, routeAction cannot contain any weightedBackendServices. Conversely, if routeAction specifies any weightedBackendServices, service must not be specified. Only one of urlRedirect, service or routeAction.weightedBackendService must be set.", @@ -51649,6 +54627,22 @@ "$ref": "UsageExportLocation", "description": "The naming prefix for daily usage reports and the Google Cloud Storage bucket where they are stored." }, + "vmDnsSetting": { + "description": "[Output Only] Default internal DNS setting used by VMs running in this project.", + "enum": [ + "GLOBAL_DEFAULT", + "UNSPECIFIED_VM_DNS_SETTING", + "ZONAL_DEFAULT", + "ZONAL_ONLY" + ], + "enumDescriptions": [ + "", + "", + "", + "" + ], + "type": "string" + }, "xpnProjectStatus": { "description": "[Output Only] The role this project has in a shared VPC configuration. Currently, only projects with the host role, which is specified by the value HOST, are differentiated.", "enum": [ @@ -52496,8 +55490,12 @@ "EXTERNAL_VPN_GATEWAYS", "FIREWALLS", "FORWARDING_RULES", + "GLOBAL_EXTERNAL_MANAGED_BACKEND_SERVICES", "GLOBAL_EXTERNAL_MANAGED_FORWARDING_RULES", + "GLOBAL_EXTERNAL_PROXY_LB_BACKEND_SERVICES", "GLOBAL_INTERNAL_ADDRESSES", + "GLOBAL_INTERNAL_MANAGED_BACKEND_SERVICES", + "GLOBAL_INTERNAL_TRAFFIC_DIRECTOR_BACKEND_SERVICES", "GPUS_ALL_REGIONS", "HEALTH_CHECKS", "IMAGES", @@ -52524,6 +55522,7 @@ "N2D_CPUS", "N2_CPUS", "NETWORKS", + "NETWORK_ATTACHMENTS", "NETWORK_ENDPOINT_GROUPS", "NETWORK_FIREWALL_POLICIES", "NODE_GROUPS", @@ -52557,7 +55556,11 @@ "PUBLIC_ADVERTISED_PREFIXES", "PUBLIC_DELEGATED_PREFIXES", "REGIONAL_AUTOSCALERS", + "REGIONAL_EXTERNAL_MANAGED_BACKEND_SERVICES", + "REGIONAL_EXTERNAL_NETWORK_LB_BACKEND_SERVICES", "REGIONAL_INSTANCE_GROUP_MANAGERS", + "REGIONAL_INTERNAL_LB_BACKEND_SERVICES", + "REGIONAL_INTERNAL_MANAGED_BACKEND_SERVICES", "RESERVATIONS", "RESOURCE_POLICIES", "ROUTERS", @@ -52573,6 +55576,7 @@ "SSL_CERTIFICATES", "STATIC_ADDRESSES", "STATIC_BYOIP_ADDRESSES", + "STATIC_EXTERNAL_IPV6_ADDRESS_RANGES", "SUBNETWORKS", "T2A_CPUS", "T2D_CPUS", @@ -52702,6 +55706,15 @@ "", "", "", + "", + "", + "", + "", + "", + "", + "", + "", + "", "The total number of snapshots allowed for a single project.", "", "", @@ -52720,6 +55733,7 @@ "", "", "", + "", "" ], "type": "string" @@ -52736,6 +55750,33 @@ }, "type": "object" }, + "QuotaExceededInfo": { + "description": "Additional details for quota exceeded error for resource quota.", + "id": "QuotaExceededInfo", + "properties": { + "dimensions": { + "additionalProperties": { + "type": "string" + }, + "description": "The map holding related quota dimensions.", + "type": "object" + }, + "limit": { + "description": "Current effective quota limit. The limit's unit depends on the quota type or metric.", + "format": "double", + "type": "number" + }, + "limitName": { + "description": "The name of the quota limit.", + "type": "string" + }, + "metricName": { + "description": "The Compute Engine quota metric name.", + "type": "string" + } + }, + "type": "object" + }, "Reference": { "description": "Represents a reference to a resource.", "id": "Reference", @@ -54107,6 +57148,17 @@ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "type": "string" }, + "resourcePolicies": { + "additionalProperties": { + "type": "string" + }, + "description": "Resource policies to be added to this reservation. The key is defined by user, and the value is resource policy url. This is to define placement policy with reservation.", + "type": "object" + }, + "resourceStatus": { + "$ref": "AllocationResourceStatus", + "description": "[Output Only] Status information for Reservation resource." + }, "satisfiesPzs": { "description": "[Output Only] Reserved for future use.", "type": "boolean" @@ -54117,7 +57169,7 @@ }, "shareSettings": { "$ref": "ShareSettings", - "description": "Share-settings for shared-reservation" + "description": "Specify share-settings to create a shared reservation. This property is optional. For more information about the syntax and options for this field and its subfields, see the guide for creating a shared reservation." }, "specificReservation": { "$ref": "AllocationSpecificSKUReservation", @@ -54560,7 +57612,7 @@ "type": "string" }, "type": { - "description": "Type of resource for which this commitment applies. Possible values are VCPU and MEMORY", + "description": "Type of resource for which this commitment applies. Possible values are VCPU, MEMORY, LOCAL_SSD, and ACCELERATOR.", "enum": [ "ACCELERATOR", "LOCAL_SSD", @@ -54980,7 +58032,7 @@ "type": "string" }, "timeZone": { - "description": "Specifies the time zone to be used in interpreting Schedule.schedule. The value of this field must be a time zone name from the tz database: http://en.wikipedia.org/wiki/Tz_database.", + "description": "Specifies the time zone to be used in interpreting Schedule.schedule. The value of this field must be a time zone name from the tz database: https://wikipedia.org/wiki/Tz_database.", "type": "string" }, "vmStartSchedule": { @@ -55294,6 +58346,17 @@ }, "type": "object" }, + "ResourceStatus": { + "description": "Contains output only fields. Use this sub-message for actual values set on Instance attributes as compared to the value requested by the user (intent) in their instance CRUD calls.", + "id": "ResourceStatus", + "properties": { + "physicalHost": { + "description": "[Output Only] An opaque ID of the host on which the VM is running.", + "type": "string" + } + }, + "type": "object" + }, "Route": { "description": "Represents a Route resource. A route defines a path from VM instances in the VPC network to a specific destination. This destination can be inside or outside the VPC network. For more information, read the Routes overview.", "id": "Route", @@ -55706,7 +58769,7 @@ "type": "string" }, "encryptedInterconnectRouter": { - "description": "Indicates if a router is dedicated for use with encrypted VLAN attachments (interconnectAttachments). Not currently available publicly. ", + "description": "Indicates if a router is dedicated for use with encrypted VLAN attachments (interconnectAttachments).", "type": "boolean" }, "id": { @@ -55726,6 +58789,13 @@ "description": "[Output Only] Type of resource. Always compute#router for routers.", "type": "string" }, + "md5AuthenticationKeys": { + "description": "Keys used for MD5 authentication.", + "items": { + "$ref": "RouterMd5AuthenticationKey" + }, + "type": "array" + }, "name": { "annotations": { "required": [ @@ -55971,7 +59041,7 @@ "type": "string" }, "advertisedGroups": { - "description": "User-specified list of prefix groups to advertise in custom mode, which can take one of the following options: - ALL_SUBNETS: Advertises all available subnets, including peer VPC subnets. - ALL_VPC_SUBNETS: Advertises the router's own VPC subnets. Note that this field can only be populated if advertise_mode is CUSTOM and overrides the list defined for the router (in the \"bgp\" message). These groups are advertised in addition to any specified prefixes. Leave this field blank to advertise no custom groups.", + "description": "User-specified list of prefix groups to advertise in custom mode, which currently supports the following option: - ALL_SUBNETS: Advertises all of the router's own VPC subnets. This excludes any routes learned for subnets that use VPC Network Peering. Note that this field can only be populated if advertise_mode is CUSTOM and overrides the list defined for the router (in the \"bgp\" message). These groups are advertised in addition to any specified prefixes. Leave this field blank to advertise no custom groups.", "items": { "enum": [ "ALL_SUBNETS" @@ -56039,6 +59109,10 @@ ], "type": "string" }, + "md5AuthenticationKeyName": { + "description": "Present if MD5 authentication is enabled for the peering. Must be the name of one of the entries in the Router.md5_authentication_keys. The field must comply with RFC1035.", + "type": "string" + }, "name": { "annotations": { "required": [ @@ -56282,6 +59356,31 @@ }, "type": "object" }, + "RouterMd5AuthenticationKey": { + "id": "RouterMd5AuthenticationKey", + "properties": { + "key": { + "annotations": { + "required": [ + "compute.routers.insert" + ] + }, + "description": "[Input only] Value of the key. For patch and update calls, it can be skipped to copy the value from the previous configuration. This is allowed if the key with the same name existed before the operation. Maximum length is 80 characters. Can only contain printable ASCII characters.", + "type": "string" + }, + "name": { + "annotations": { + "required": [ + "compute.routers.insert", + "compute.routers.update" + ] + }, + "description": "Name used to identify the key. Must be unique within a router. Must be referenced by at least one bgpPeer. Must comply with RFC1035.", + "type": "string" + } + }, + "type": "object" + }, "RouterNat": { "description": "Represents a Nat resource. It enables the VMs within the specified subnetworks to access Internet without external IP addresses. It specifies a list of subnetworks (and the ranges within) that want to use NAT. Customers can also provide the external IPs that would be used for NAT. GCP would auto-allocate ephemeral IPs if no external IPs are provided.", "id": "RouterNat", @@ -56561,14 +59660,26 @@ "bfdStatus": { "$ref": "BfdStatus" }, + "enableIpv6": { + "description": "Enable IPv6 traffic over BGP Peer. If not specified, it is disabled by default.", + "type": "boolean" + }, "ipAddress": { "description": "IP address of the local BGP interface.", "type": "string" }, + "ipv6NexthopAddress": { + "description": "IPv6 address of the local BGP interface.", + "type": "string" + }, "linkedVpnTunnel": { "description": "URL of the VPN tunnel that this BGP peer controls.", "type": "string" }, + "md5AuthEnabled": { + "description": "Informs whether MD5 authentication is enabled on this BGP peer.", + "type": "boolean" + }, "name": { "description": "Name of this BGP peer. Unique within the Routers resource.", "type": "string" @@ -56582,6 +59693,10 @@ "description": "IP address of the remote BGP interface.", "type": "string" }, + "peerIpv6NexthopAddress": { + "description": "IPv6 address of the remote BGP interface.", + "type": "string" + }, "routerApplianceInstance": { "description": "[Output only] URI of the VM instance that is used as third-party router appliances such as Next Gen Firewalls, Virtual Routers, or Router Appliances. The VM instance is the peer side of the BGP session.", "type": "string" @@ -56604,6 +59719,18 @@ ], "type": "string" }, + "statusReason": { + "description": "Indicates why particular status was returned.", + "enum": [ + "MD5_AUTH_INTERNAL_PROBLEM", + "STATUS_REASON_UNSPECIFIED" + ], + "enumDescriptions": [ + "Indicates internal problems with configuration of MD5 authentication. This particular reason can only be returned when md5AuthEnabled is true and status is DOWN.", + "" + ], + "type": "string" + }, "uptime": { "description": "Time this session has been up. Format: 14 years, 51 weeks, 6 days, 23 hours, 59 minutes, 59 seconds", "type": "string" @@ -56910,25 +60037,25 @@ "id": "SSLHealthCheck", "properties": { "port": { - "description": "The TCP port number for the health check request. The default value is 443. Valid values are 1 through 65535.", + "description": "The TCP port number to which the health check prober sends packets. The default value is 443. Valid values are 1 through 65535.", "format": "int32", "type": "integer" }, "portName": { - "description": "Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name are defined, port takes precedence.", + "description": "Not supported.", "type": "string" }, "portSpecification": { - "description": "Specifies how port is selected for health checking, can be one of following values: USE_FIXED_PORT: The port number in port is used for health checking. USE_NAMED_PORT: The portName is used for health checking. USE_SERVING_PORT: For NetworkEndpointGroup, the port specified for each network endpoint is used for health checking. For other backends, the port or named port specified in the Backend Service is used for health checking. If not specified, SSL health check follows behavior specified in port and portName fields.", + "description": "Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for pass-through load balancers and backend services for proxy load balancers. Not supported by target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for pass-through load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports.", "enum": [ "USE_FIXED_PORT", "USE_NAMED_PORT", "USE_SERVING_PORT" ], "enumDescriptions": [ - "The port number in port is used for health checking.", - "The portName is used for health checking.", - "For NetworkEndpointGroup, the port specified for each network endpoint is used for health checking. For other backends, the port or named port specified in the Backend Service is used for health checking." + "The port number in the health check's port is used for health checking. Applies to network endpoint group and instance group backends.", + "Not supported.", + "For network endpoint group backends, the health check uses the port number specified on each endpoint in the network endpoint group. For instance group backends, the health check uses the port number specified for the backend service's named port defined in the instance group's named ports." ], "type": "string" }, @@ -56945,11 +60072,11 @@ "type": "string" }, "request": { - "description": "The application data to send once the SSL connection has been established (default value is empty). If both request and response are empty, the connection establishment alone will indicate health. The request data can only be ASCII.", + "description": "Instructs the health check prober to send this exact ASCII string, up to 1024 bytes in length, after establishing the TCP connection and SSL handshake.", "type": "string" }, "response": { - "description": "The bytes to match against the beginning of the response data. If left empty (the default value), any response will indicate health. The response data can only be ASCII.", + "description": "Creates a content-based SSL health check. In addition to establishing a TCP connection and the TLS handshake, you can configure the health check to pass only when the backend sends this exact response ASCII string, up to 1024 bytes in length. For details, see: https://cloud.google.com/load-balancing/docs/health-check-concepts#criteria-protocol-ssl-tcp", "type": "string" } }, @@ -57561,7 +60688,7 @@ "type": "string" }, "rules": { - "description": "A list of rules that belong to this policy. There must always be a default rule (rule with priority 2147483647 and match \"*\"). If no rules are provided when creating a security policy, a default rule with action \"allow\" will be added.", + "description": "A list of rules that belong to this policy. There must always be a default rule which is a rule with priority 2147483647 and match all condition (for the match condition this means match \"*\" for srcIpRanges and for the networkMatch condition every field must be either match \"*\" or not set). If no rules are provided when creating a security policy, a default rule with action \"allow\" will be added.", "items": { "$ref": "SecurityPolicyRule" }, @@ -57572,7 +60699,7 @@ "type": "string" }, "type": { - "description": "The type indicates the intended use of the security policy. - CLOUD_ARMOR: Cloud Armor backend security policies can be configured to filter incoming HTTP requests targeting backend services. They filter requests before they hit the origin servers. - CLOUD_ARMOR_EDGE: Cloud Armor edge security policies can be configured to filter incoming HTTP requests targeting backend services (including Cloud CDN-enabled) as well as backend buckets (Cloud Storage). They filter requests before the request is served from Google's cache. - CLOUD_ARMOR_INTERNAL_SERVICE: Cloud Armor internal service policies can be configured to filter HTTP requests targeting services managed by Traffic Director in a service mesh. They filter requests before the request is served from the application. This field can be set only at resource creation time.", + "description": "The type indicates the intended use of the security policy. - CLOUD_ARMOR: Cloud Armor backend security policies can be configured to filter incoming HTTP requests targeting backend services. They filter requests before they hit the origin servers. - CLOUD_ARMOR_EDGE: Cloud Armor edge security policies can be configured to filter incoming HTTP requests targeting backend services (including Cloud CDN-enabled) as well as backend buckets (Cloud Storage). They filter requests before the request is served from Google's cache. - CLOUD_ARMOR_INTERNAL_SERVICE: Cloud Armor internal service policies can be configured to filter HTTP requests targeting services managed by Traffic Director in a service mesh. They filter requests before the request is served from the application. - CLOUD_ARMOR_NETWORK: Cloud Armor network policies can be configured to filter packets targeting network load balancing resources such as backend services, target pools, target instances, and instances with external IPs. They filter requests before the request is served from the application. This field can be set only at resource creation time.", "enum": [ "CLOUD_ARMOR", "CLOUD_ARMOR_EDGE", @@ -57625,6 +60752,10 @@ "SecurityPolicyAdvancedOptionsConfig": { "id": "SecurityPolicyAdvancedOptionsConfig", "properties": { + "jsonCustomConfig": { + "$ref": "SecurityPolicyAdvancedOptionsConfigJsonCustomConfig", + "description": "Custom configuration to apply the JSON parsing. Only applicable when json_parsing is set to STANDARD." + }, "jsonParsing": { "enum": [ "DISABLED", @@ -57650,6 +60781,19 @@ }, "type": "object" }, + "SecurityPolicyAdvancedOptionsConfigJsonCustomConfig": { + "id": "SecurityPolicyAdvancedOptionsConfigJsonCustomConfig", + "properties": { + "contentTypes": { + "description": "A list of custom Content-Type header values to apply the JSON parsing. As per RFC 1341, a Content-Type header value has the following format: Content-Type := type \"/\" subtype *[\";\" parameter] When configuring a custom Content-Type header value, only the type/subtype needs to be specified, and the parameters should be excluded.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, "SecurityPolicyDdosProtectionConfig": { "id": "SecurityPolicyDdosProtectionConfig", "properties": { @@ -57806,7 +60950,7 @@ "id": "SecurityPolicyRule", "properties": { "action": { - "description": "The Action to perform when the rule is matched. The following are the valid actions: - allow: allow access to target. - deny(): deny access to target, returns the HTTP response code specified (valid values are 403, 404, and 502). - rate_based_ban: limit client traffic to the configured threshold and ban the client if the traffic exceeds the threshold. Configure parameters for this action in RateLimitOptions. Requires rate_limit_options to be set. - redirect: redirect to a different target. This can either be an internal reCAPTCHA redirect, or an external URL-based redirect via a 302 response. Parameters for this action can be configured via redirectOptions. - throttle: limit client traffic to the configured threshold. Configure parameters for this action in rateLimitOptions. Requires rate_limit_options to be set for this. ", + "description": "The Action to perform when the rule is matched. The following are the valid actions: - allow: allow access to target. - deny(STATUS): deny access to target, returns the HTTP response code specified. Valid values for `STATUS` are 403, 404, and 502. - rate_based_ban: limit client traffic to the configured threshold and ban the client if the traffic exceeds the threshold. Configure parameters for this action in RateLimitOptions. Requires rate_limit_options to be set. - redirect: redirect to a different target. This can either be an internal reCAPTCHA redirect, or an external URL-based redirect via a 302 response. Parameters for this action can be configured via redirectOptions. - throttle: limit client traffic to the configured threshold. Configure parameters for this action in rateLimitOptions. Requires rate_limit_options to be set for this. ", "type": "string" }, "description": { @@ -57826,6 +60970,10 @@ "$ref": "SecurityPolicyRuleMatcher", "description": "A match condition that incoming traffic is evaluated against. If it evaluates to true, the corresponding 'action' is enforced." }, + "preconfiguredWafConfig": { + "$ref": "SecurityPolicyRulePreconfiguredWafConfig", + "description": "Preconfigured WAF configuration to be applied for the rule. If the rule does not evaluate preconfigured WAF rules, i.e., if evaluatePreconfiguredWaf() is not used, this field will have no effect." + }, "preview": { "description": "If set to true, the specified action is not enforced.", "type": "boolean" @@ -57911,6 +61059,92 @@ }, "type": "object" }, + "SecurityPolicyRulePreconfiguredWafConfig": { + "id": "SecurityPolicyRulePreconfiguredWafConfig", + "properties": { + "exclusions": { + "description": "A list of exclusions to apply during preconfigured WAF evaluation.", + "items": { + "$ref": "SecurityPolicyRulePreconfiguredWafConfigExclusion" + }, + "type": "array" + } + }, + "type": "object" + }, + "SecurityPolicyRulePreconfiguredWafConfigExclusion": { + "id": "SecurityPolicyRulePreconfiguredWafConfigExclusion", + "properties": { + "requestCookiesToExclude": { + "description": "A list of request cookie names whose value will be excluded from inspection during preconfigured WAF evaluation.", + "items": { + "$ref": "SecurityPolicyRulePreconfiguredWafConfigExclusionFieldParams" + }, + "type": "array" + }, + "requestHeadersToExclude": { + "description": "A list of request header names whose value will be excluded from inspection during preconfigured WAF evaluation.", + "items": { + "$ref": "SecurityPolicyRulePreconfiguredWafConfigExclusionFieldParams" + }, + "type": "array" + }, + "requestQueryParamsToExclude": { + "description": "A list of request query parameter names whose value will be excluded from inspection during preconfigured WAF evaluation. Note that the parameter can be in the query string or in the POST body.", + "items": { + "$ref": "SecurityPolicyRulePreconfiguredWafConfigExclusionFieldParams" + }, + "type": "array" + }, + "requestUrisToExclude": { + "description": "A list of request URIs from the request line to be excluded from inspection during preconfigured WAF evaluation. When specifying this field, the query or fragment part should be excluded.", + "items": { + "$ref": "SecurityPolicyRulePreconfiguredWafConfigExclusionFieldParams" + }, + "type": "array" + }, + "targetRuleIds": { + "description": "A list of target rule IDs under the WAF rule set to apply the preconfigured WAF exclusion. If omitted, it refers to all the rule IDs under the WAF rule set.", + "items": { + "type": "string" + }, + "type": "array" + }, + "targetRuleSet": { + "description": "Target WAF rule set to apply the preconfigured WAF exclusion.", + "type": "string" + } + }, + "type": "object" + }, + "SecurityPolicyRulePreconfiguredWafConfigExclusionFieldParams": { + "id": "SecurityPolicyRulePreconfiguredWafConfigExclusionFieldParams", + "properties": { + "op": { + "description": "The match operator for the field.", + "enum": [ + "CONTAINS", + "ENDS_WITH", + "EQUALS", + "EQUALS_ANY", + "STARTS_WITH" + ], + "enumDescriptions": [ + "The operator matches if the field value contains the specified value.", + "The operator matches if the field value ends with the specified value.", + "The operator matches if the field value equals the specified value.", + "The operator matches if the field value is any value.", + "The operator matches if the field value starts with the specified value." + ], + "type": "string" + }, + "val": { + "description": "The value of the field.", + "type": "string" + } + }, + "type": "object" + }, "SecurityPolicyRuleRateLimitOptions": { "id": "SecurityPolicyRuleRateLimitOptions", "properties": { @@ -57928,15 +61162,21 @@ "type": "string" }, "enforceOnKey": { - "description": "Determines the key to enforce the rate_limit_threshold on. Possible values are: - ALL: A single rate limit threshold is applied to all the requests matching this rule. This is the default value if this field 'enforce_on_key' is not configured. - IP: The source IP address of the request is the key. Each IP has this limit enforced separately. - HTTP_HEADER: The value of the HTTP header whose name is configured under \"enforce_on_key_name\". The key value is truncated to the first 128 bytes of the header value. If no such header is present in the request, the key type defaults to ALL. - XFF_IP: The first IP address (i.e. the originating client IP address) specified in the list of IPs under X-Forwarded-For HTTP header. If no such header is present or the value is not a valid IP, the key defaults to the source IP address of the request i.e. key type IP. - HTTP_COOKIE: The value of the HTTP cookie whose name is configured under \"enforce_on_key_name\". The key value is truncated to the first 128 bytes of the cookie value. If no such cookie is present in the request, the key type defaults to ALL. ", + "description": "Determines the key to enforce the rate_limit_threshold on. Possible values are: - ALL: A single rate limit threshold is applied to all the requests matching this rule. This is the default value if \"enforceOnKey\" is not configured. - IP: The source IP address of the request is the key. Each IP has this limit enforced separately. - HTTP_HEADER: The value of the HTTP header whose name is configured under \"enforceOnKeyName\". The key value is truncated to the first 128 bytes of the header value. If no such header is present in the request, the key type defaults to ALL. - XFF_IP: The first IP address (i.e. the originating client IP address) specified in the list of IPs under X-Forwarded-For HTTP header. If no such header is present or the value is not a valid IP, the key defaults to the source IP address of the request i.e. key type IP. - HTTP_COOKIE: The value of the HTTP cookie whose name is configured under \"enforceOnKeyName\". The key value is truncated to the first 128 bytes of the cookie value. If no such cookie is present in the request, the key type defaults to ALL. - HTTP_PATH: The URL path of the HTTP request. The key value is truncated to the first 128 bytes. - SNI: Server name indication in the TLS session of the HTTPS request. The key value is truncated to the first 128 bytes. The key type defaults to ALL on a HTTP session. - REGION_CODE: The country/region from which the request originates. ", "enum": [ "ALL", "HTTP_COOKIE", "HTTP_HEADER", + "HTTP_PATH", "IP", + "REGION_CODE", + "SNI", "XFF_IP" ], "enumDescriptions": [ + "", + "", + "", "", "", "", @@ -57950,7 +61190,7 @@ "type": "string" }, "exceedAction": { - "description": "Action to take for requests that are above the configured rate limit threshold, to either deny with a specified HTTP response code, or redirect to a different endpoint. Valid options are \"deny(status)\", where valid values for status are 403, 404, 429, and 502, and \"redirect\" where the redirect parameters come from exceedRedirectOptions below.", + "description": "Action to take for requests that are above the configured rate limit threshold, to either deny with a specified HTTP response code, or redirect to a different endpoint. Valid options are `deny(STATUS)`, where valid values for `STATUS` are 403, 404, 429, and 502, and `redirect`, where the redirect parameters come from `exceedRedirectOptions` below.", "type": "string" }, "exceedRedirectOptions": { @@ -58372,6 +61612,10 @@ "format": "uint32", "type": "integer" }, + "networkUrl": { + "description": "The network URL for the network to set the limit for.", + "type": "string" + }, "projectIdOrNum": { "description": "The project id or number for the project to set the limit for.", "type": "string" @@ -58615,11 +61859,13 @@ "description": "Type of sharing for this shared-reservation", "enum": [ "LOCAL", + "ORGANIZATION", "SHARE_TYPE_UNSPECIFIED", "SPECIFIC_PROJECTS" ], "enumDescriptions": [ "Default value.", + "Shared-reservation is open to entire Organization", "Default value. This value is unused.", "Shared-reservation is open to specific projects" ], @@ -59048,7 +62294,7 @@ "id": "SourceInstanceParams", "properties": { "diskConfigs": { - "description": "Attached disks configuration. If not provided, defaults are applied: For boot disk and any other R/W disks, new custom images will be created from each disk. For read-only disks, they will be attached in read-only mode. Local SSD disks will be created as blank volumes.", + "description": "Attached disks configuration. If not provided, defaults are applied: For boot disk and any other R/W disks, the source images for each disk will be used. For read-only disks, they will be attached in read-only mode. Local SSD disks will be created as blank volumes.", "items": { "$ref": "DiskInstantiationConfig" }, @@ -59647,6 +62893,136 @@ }, "type": "object" }, + "SslPoliciesAggregatedList": { + "id": "SslPoliciesAggregatedList", + "properties": { + "etag": { + "type": "string" + }, + "id": { + "description": "[Output Only] Unique identifier for the resource; defined by the server.", + "type": "string" + }, + "items": { + "additionalProperties": { + "$ref": "SslPoliciesScopedList", + "description": "Name of the scope containing this set of SSL policies." + }, + "description": "A list of SslPoliciesScopedList resources.", + "type": "object" + }, + "kind": { + "default": "compute#sslPoliciesAggregatedList", + "description": "[Output Only] Type of resource. Always compute#sslPolicyAggregatedList for lists of SSL Policies.", + "type": "string" + }, + "nextPageToken": { + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for this resource.", + "type": "string" + }, + "unreachables": { + "description": "[Output Only] Unreachable resources.", + "items": { + "type": "string" + }, + "type": "array" + }, + "warning": { + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", + "LARGE_DEPLOYMENT_WARNING", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "Warning about failed cleanup of transient changes made by a failed operation.", + "A link to a deprecated resource was created.", + "When deploying and at least one of the resources has a type marked as deprecated", + "The user created a boot disk that is larger than image size.", + "When deploying and at least one of the resources has a type marked as experimental", + "Warning that is present in an external api call", + "Warning that value of a field has been overridden. Deprecated unused field.", + "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", + "When deploying a deployment with a exceedingly large number of resources", + "A resource depends on a missing type", + "The route's nextHopIp address is not assigned to an instance on the network.", + "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", + "The route's nextHopInstance URL refers to an instance that does not exist.", + "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", + "The route's next hop instance does not have a status of RUNNING.", + "Error which is not critical. We decided to continue the process despite the mentioned error.", + "No results are present on a particular list page.", + "Success is reported, but some results may be missing due to errors", + "The user attempted to use a resource that requires a TOS they have not accepted.", + "Warning that a resource is in use.", + "One or more of the resources set to auto-delete could not be deleted because they were in use.", + "When a resource schema validation is ignored.", + "Instance template used in instance group manager is valid as such, but its application does not make a lot of sense, because it allows only single instance in instance group.", + "When undeclared properties in the schema are present", + "A given scope cannot be reached." + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example: \"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" } ", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, "SslPoliciesList": { "id": "SslPoliciesList", "properties": { @@ -59766,18 +63142,120 @@ }, "type": "object" }, - "SslPoliciesListAvailableFeaturesResponse": { - "id": "SslPoliciesListAvailableFeaturesResponse", - "properties": { - "features": { - "items": { - "type": "string" - }, - "type": "array" - } - }, - "type": "object" - }, + "SslPoliciesListAvailableFeaturesResponse": { + "id": "SslPoliciesListAvailableFeaturesResponse", + "properties": { + "features": { + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "SslPoliciesScopedList": { + "id": "SslPoliciesScopedList", + "properties": { + "sslPolicies": { + "description": "A list of SslPolicies contained in this scope.", + "items": { + "$ref": "SslPolicy" + }, + "type": "array" + }, + "warning": { + "description": "Informational warning which replaces the list of SSL policies when the list is empty.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", + "LARGE_DEPLOYMENT_WARNING", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "Warning about failed cleanup of transient changes made by a failed operation.", + "A link to a deprecated resource was created.", + "When deploying and at least one of the resources has a type marked as deprecated", + "The user created a boot disk that is larger than image size.", + "When deploying and at least one of the resources has a type marked as experimental", + "Warning that is present in an external api call", + "Warning that value of a field has been overridden. Deprecated unused field.", + "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", + "When deploying a deployment with a exceedingly large number of resources", + "A resource depends on a missing type", + "The route's nextHopIp address is not assigned to an instance on the network.", + "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", + "The route's nextHopInstance URL refers to an instance that does not exist.", + "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", + "The route's next hop instance does not have a status of RUNNING.", + "Error which is not critical. We decided to continue the process despite the mentioned error.", + "No results are present on a particular list page.", + "Success is reported, but some results may be missing due to errors", + "The user attempted to use a resource that requires a TOS they have not accepted.", + "Warning that a resource is in use.", + "One or more of the resources set to auto-delete could not be deleted because they were in use.", + "When a resource schema validation is ignored.", + "Instance template used in instance group manager is valid as such, but its application does not make a lot of sense, because it allows only single instance in instance group.", + "When undeclared properties in the schema are present", + "A given scope cannot be reached." + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example: \"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" } ", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, "SslPolicy": { "description": "Represents an SSL Policy resource. Use SSL policies to control the SSL features, such as versions and cipher suites, offered by an HTTPS or SSL Proxy load balancer. For more information, read SSL Policy Concepts.", "id": "SslPolicy", @@ -59854,6 +63332,10 @@ ], "type": "string" }, + "region": { + "description": "[Output Only] URL of the region where the regional SSL policy resides. This field is not applicable to global SSL policies.", + "type": "string" + }, "selfLink": { "description": "[Output Only] Server-defined URL for the resource.", "type": "string" @@ -60021,7 +63503,7 @@ "type": "boolean" }, "externalIpv6Prefix": { - "description": "[Output Only] The external IPv6 address range that is assigned to this subnetwork.", + "description": "The external IPv6 address range that is owned by this subnetwork.", "type": "string" }, "fingerprint": { @@ -60636,25 +64118,25 @@ "id": "TCPHealthCheck", "properties": { "port": { - "description": "The TCP port number for the health check request. The default value is 80. Valid values are 1 through 65535.", + "description": "The TCP port number to which the health check prober sends packets. The default value is 80. Valid values are 1 through 65535.", "format": "int32", "type": "integer" }, "portName": { - "description": "Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name are defined, port takes precedence.", + "description": "Not supported.", "type": "string" }, "portSpecification": { - "description": "Specifies how port is selected for health checking, can be one of following values: USE_FIXED_PORT: The port number in port is used for health checking. USE_NAMED_PORT: The portName is used for health checking. USE_SERVING_PORT: For NetworkEndpointGroup, the port specified for each network endpoint is used for health checking. For other backends, the port or named port specified in the Backend Service is used for health checking. If not specified, TCP health check follows behavior specified in port and portName fields.", + "description": "Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for pass-through load balancers and backend services for proxy load balancers. Not supported by target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for pass-through load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports.", "enum": [ "USE_FIXED_PORT", "USE_NAMED_PORT", "USE_SERVING_PORT" ], "enumDescriptions": [ - "The port number in port is used for health checking.", - "The portName is used for health checking.", - "For NetworkEndpointGroup, the port specified for each network endpoint is used for health checking. For other backends, the port or named port specified in the Backend Service is used for health checking." + "The port number in the health check's port is used for health checking. Applies to network endpoint group and instance group backends.", + "Not supported.", + "For network endpoint group backends, the health check uses the port number specified on each endpoint in the network endpoint group. For instance group backends, the health check uses the port number specified for the backend service's named port defined in the instance group's named ports." ], "type": "string" }, @@ -60671,11 +64153,11 @@ "type": "string" }, "request": { - "description": "The application data to send once the TCP connection has been established (default value is empty). If both request and response are empty, the connection establishment alone will indicate health. The request data can only be ASCII.", + "description": "Instructs the health check prober to send this exact ASCII string, up to 1024 bytes in length, after establishing the TCP connection.", "type": "string" }, "response": { - "description": "The bytes to match against the beginning of the response data. If left empty (the default value), any response will indicate health. The response data can only be ASCII.", + "description": "Creates a content-based TCP health check. In addition to establishing a TCP connection, you can configure the health check to pass only when the backend sends this exact response ASCII string, up to 1024 bytes in length. For details, see: https://cloud.google.com/load-balancing/docs/health-check-concepts#criteria-protocol-ssl-tcp", "type": "string" } }, @@ -62575,37 +66057,365 @@ }, "type": "object" }, - "TargetReference": { - "id": "TargetReference", + "TargetReference": { + "id": "TargetReference", + "properties": { + "target": { + "type": "string" + } + }, + "type": "object" + }, + "TargetSslProxiesSetBackendServiceRequest": { + "id": "TargetSslProxiesSetBackendServiceRequest", + "properties": { + "service": { + "description": "The URL of the new BackendService resource for the targetSslProxy.", + "type": "string" + } + }, + "type": "object" + }, + "TargetSslProxiesSetCertificateMapRequest": { + "id": "TargetSslProxiesSetCertificateMapRequest", + "properties": { + "certificateMap": { + "description": "URL of the Certificate Map to associate with this TargetSslProxy.", + "type": "string" + } + }, + "type": "object" + }, + "TargetSslProxiesSetProxyHeaderRequest": { + "id": "TargetSslProxiesSetProxyHeaderRequest", + "properties": { + "proxyHeader": { + "description": "The new type of proxy header to append before sending data to the backend. NONE or PROXY_V1 are allowed.", + "enum": [ + "NONE", + "PROXY_V1" + ], + "enumDescriptions": [ + "", + "" + ], + "type": "string" + } + }, + "type": "object" + }, + "TargetSslProxiesSetSslCertificatesRequest": { + "id": "TargetSslProxiesSetSslCertificatesRequest", + "properties": { + "sslCertificates": { + "description": "New set of URLs to SslCertificate resources to associate with this TargetSslProxy. At least one SSL certificate must be specified. Currently, you may specify up to 15 SSL certificates.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "TargetSslProxy": { + "description": "Represents a Target SSL Proxy resource. A target SSL proxy is a component of a SSL Proxy load balancer. Global forwarding rules reference a target SSL proxy, and the target proxy then references an external backend service. For more information, read Using Target Proxies.", + "id": "TargetSslProxy", + "properties": { + "certificateMap": { + "description": "URL of a certificate map that identifies a certificate map associated with the given target proxy. This field can only be set for global target proxies. If set, sslCertificates will be ignored.", + "type": "string" + }, + "creationTimestamp": { + "description": "[Output Only] Creation timestamp in RFC3339 text format.", + "type": "string" + }, + "description": { + "description": "An optional description of this resource. Provide this property when you create the resource.", + "type": "string" + }, + "id": { + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64", + "type": "string" + }, + "kind": { + "default": "compute#targetSslProxy", + "description": "[Output Only] Type of the resource. Always compute#targetSslProxy for target SSL proxies.", + "type": "string" + }, + "name": { + "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "type": "string" + }, + "proxyHeader": { + "description": "Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE.", + "enum": [ + "NONE", + "PROXY_V1" + ], + "enumDescriptions": [ + "", + "" + ], + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for the resource.", + "type": "string" + }, + "service": { + "description": "URL to the BackendService resource.", + "type": "string" + }, + "sslCertificates": { + "description": "URLs to SslCertificate resources that are used to authenticate connections to Backends. At least one SSL certificate must be specified. Currently, you may specify up to 15 SSL certificates. sslCertificates do not apply when the load balancing scheme is set to INTERNAL_SELF_MANAGED.", + "items": { + "type": "string" + }, + "type": "array" + }, + "sslPolicy": { + "description": "URL of SslPolicy resource that will be associated with the TargetSslProxy resource. If not set, the TargetSslProxy resource will not have any SSL policy configured.", + "type": "string" + } + }, + "type": "object" + }, + "TargetSslProxyList": { + "description": "Contains a list of TargetSslProxy resources.", + "id": "TargetSslProxyList", + "properties": { + "id": { + "description": "[Output Only] Unique identifier for the resource; defined by the server.", + "type": "string" + }, + "items": { + "description": "A list of TargetSslProxy resources.", + "items": { + "$ref": "TargetSslProxy" + }, + "type": "array" + }, + "kind": { + "default": "compute#targetSslProxyList", + "description": "Type of resource.", + "type": "string" + }, + "nextPageToken": { + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for this resource.", + "type": "string" + }, + "warning": { + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", + "LARGE_DEPLOYMENT_WARNING", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "Warning about failed cleanup of transient changes made by a failed operation.", + "A link to a deprecated resource was created.", + "When deploying and at least one of the resources has a type marked as deprecated", + "The user created a boot disk that is larger than image size.", + "When deploying and at least one of the resources has a type marked as experimental", + "Warning that is present in an external api call", + "Warning that value of a field has been overridden. Deprecated unused field.", + "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", + "When deploying a deployment with a exceedingly large number of resources", + "A resource depends on a missing type", + "The route's nextHopIp address is not assigned to an instance on the network.", + "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", + "The route's nextHopInstance URL refers to an instance that does not exist.", + "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", + "The route's next hop instance does not have a status of RUNNING.", + "Error which is not critical. We decided to continue the process despite the mentioned error.", + "No results are present on a particular list page.", + "Success is reported, but some results may be missing due to errors", + "The user attempted to use a resource that requires a TOS they have not accepted.", + "Warning that a resource is in use.", + "One or more of the resources set to auto-delete could not be deleted because they were in use.", + "When a resource schema validation is ignored.", + "Instance template used in instance group manager is valid as such, but its application does not make a lot of sense, because it allows only single instance in instance group.", + "When undeclared properties in the schema are present", + "A given scope cannot be reached." + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example: \"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" } ", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "TargetTcpProxiesScopedList": { + "id": "TargetTcpProxiesScopedList", "properties": { - "target": { - "type": "string" + "targetTcpProxies": { + "description": "A list of TargetTcpProxies contained in this scope.", + "items": { + "$ref": "TargetTcpProxy" + }, + "type": "array" + }, + "warning": { + "description": "Informational warning which replaces the list of backend services when the list is empty.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", + "LARGE_DEPLOYMENT_WARNING", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "Warning about failed cleanup of transient changes made by a failed operation.", + "A link to a deprecated resource was created.", + "When deploying and at least one of the resources has a type marked as deprecated", + "The user created a boot disk that is larger than image size.", + "When deploying and at least one of the resources has a type marked as experimental", + "Warning that is present in an external api call", + "Warning that value of a field has been overridden. Deprecated unused field.", + "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", + "When deploying a deployment with a exceedingly large number of resources", + "A resource depends on a missing type", + "The route's nextHopIp address is not assigned to an instance on the network.", + "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", + "The route's nextHopInstance URL refers to an instance that does not exist.", + "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", + "The route's next hop instance does not have a status of RUNNING.", + "Error which is not critical. We decided to continue the process despite the mentioned error.", + "No results are present on a particular list page.", + "Success is reported, but some results may be missing due to errors", + "The user attempted to use a resource that requires a TOS they have not accepted.", + "Warning that a resource is in use.", + "One or more of the resources set to auto-delete could not be deleted because they were in use.", + "When a resource schema validation is ignored.", + "Instance template used in instance group manager is valid as such, but its application does not make a lot of sense, because it allows only single instance in instance group.", + "When undeclared properties in the schema are present", + "A given scope cannot be reached." + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example: \"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" } ", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } + }, + "type": "object" } }, "type": "object" }, - "TargetSslProxiesSetBackendServiceRequest": { - "id": "TargetSslProxiesSetBackendServiceRequest", + "TargetTcpProxiesSetBackendServiceRequest": { + "id": "TargetTcpProxiesSetBackendServiceRequest", "properties": { "service": { - "description": "The URL of the new BackendService resource for the targetSslProxy.", - "type": "string" - } - }, - "type": "object" - }, - "TargetSslProxiesSetCertificateMapRequest": { - "id": "TargetSslProxiesSetCertificateMapRequest", - "properties": { - "certificateMap": { - "description": "URL of the Certificate Map to associate with this TargetSslProxy.", + "description": "The URL of the new BackendService resource for the targetTcpProxy.", "type": "string" } }, "type": "object" }, - "TargetSslProxiesSetProxyHeaderRequest": { - "id": "TargetSslProxiesSetProxyHeaderRequest", + "TargetTcpProxiesSetProxyHeaderRequest": { + "id": "TargetTcpProxiesSetProxyHeaderRequest", "properties": { "proxyHeader": { "description": "The new type of proxy header to append before sending data to the backend. NONE or PROXY_V1 are allowed.", @@ -62622,27 +66432,10 @@ }, "type": "object" }, - "TargetSslProxiesSetSslCertificatesRequest": { - "id": "TargetSslProxiesSetSslCertificatesRequest", - "properties": { - "sslCertificates": { - "description": "New set of URLs to SslCertificate resources to associate with this TargetSslProxy. At least one SSL certificate must be specified. Currently, you may specify up to 15 SSL certificates.", - "items": { - "type": "string" - }, - "type": "array" - } - }, - "type": "object" - }, - "TargetSslProxy": { - "description": "Represents a Target SSL Proxy resource. A target SSL proxy is a component of a SSL Proxy load balancer. Global forwarding rules reference a target SSL proxy, and the target proxy then references an external backend service. For more information, read Using Target Proxies.", - "id": "TargetSslProxy", + "TargetTcpProxy": { + "description": "Represents a Target TCP Proxy resource. A target TCP proxy is a component of a TCP Proxy load balancer. Global forwarding rules reference target TCP proxy, and the target proxy then references an external backend service. For more information, read TCP Proxy Load Balancing overview.", + "id": "TargetTcpProxy", "properties": { - "certificateMap": { - "description": "URL of a certificate map that identifies a certificate map associated with the given target proxy. This field can only be set for global target proxies. If set, sslCertificates will be ignored.", - "type": "string" - }, "creationTimestamp": { "description": "[Output Only] Creation timestamp in RFC3339 text format.", "type": "string" @@ -62657,8 +66450,8 @@ "type": "string" }, "kind": { - "default": "compute#targetSslProxy", - "description": "[Output Only] Type of the resource. Always compute#targetSslProxy for target SSL proxies.", + "default": "compute#targetTcpProxy", + "description": "[Output Only] Type of the resource. Always compute#targetTcpProxy for target TCP proxies.", "type": "string" }, "name": { @@ -62666,6 +66459,10 @@ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "type": "string" }, + "proxyBind": { + "description": "This field only applies when the forwarding rule that references this target proxy has a loadBalancingScheme set to INTERNAL_SELF_MANAGED. When this field is set to true, Envoy proxies set up inbound traffic interception and bind to the IP address and port specified in the forwarding rule. This is generally useful when using Traffic Director to configure Envoy as a gateway or middle proxy (in other words, not a sidecar proxy). The Envoy proxy listens for inbound requests and handles requests when it receives them. The default is false.", + "type": "boolean" + }, "proxyHeader": { "description": "Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE.", "enum": [ @@ -62678,6 +66475,10 @@ ], "type": "string" }, + "region": { + "description": "[Output Only] URL of the region where the regional TCP proxy resides. This field is not applicable to global TCP proxy.", + "type": "string" + }, "selfLink": { "description": "[Output Only] Server-defined URL for the resource.", "type": "string" @@ -62685,39 +66486,28 @@ "service": { "description": "URL to the BackendService resource.", "type": "string" - }, - "sslCertificates": { - "description": "URLs to SslCertificate resources that are used to authenticate connections to Backends. At least one SSL certificate must be specified. Currently, you may specify up to 15 SSL certificates. sslCertificates do not apply when the load balancing scheme is set to INTERNAL_SELF_MANAGED.", - "items": { - "type": "string" - }, - "type": "array" - }, - "sslPolicy": { - "description": "URL of SslPolicy resource that will be associated with the TargetSslProxy resource. If not set, the TargetSslProxy resource will not have any SSL policy configured.", - "type": "string" } }, "type": "object" }, - "TargetSslProxyList": { - "description": "Contains a list of TargetSslProxy resources.", - "id": "TargetSslProxyList", + "TargetTcpProxyAggregatedList": { + "id": "TargetTcpProxyAggregatedList", "properties": { "id": { "description": "[Output Only] Unique identifier for the resource; defined by the server.", "type": "string" }, "items": { - "description": "A list of TargetSslProxy resources.", - "items": { - "$ref": "TargetSslProxy" + "additionalProperties": { + "$ref": "TargetTcpProxiesScopedList", + "description": "Name of the scope containing this set of TargetTcpProxies." }, - "type": "array" + "description": "A list of TargetTcpProxiesScopedList resources.", + "type": "object" }, "kind": { - "default": "compute#targetSslProxyList", - "description": "Type of resource.", + "default": "compute#targetTcpProxyAggregatedList", + "description": "[Output Only] Type of resource. Always compute#targetTcpProxyAggregatedList for lists of Target TCP Proxies.", "type": "string" }, "nextPageToken": { @@ -62728,6 +66518,13 @@ "description": "[Output Only] Server-defined URL for this resource.", "type": "string" }, + "unreachables": { + "description": "[Output Only] Unreachable resources.", + "items": { + "type": "string" + }, + "type": "array" + }, "warning": { "description": "[Output Only] Informational warning message.", "properties": { @@ -62820,88 +66617,6 @@ }, "type": "object" }, - "TargetTcpProxiesSetBackendServiceRequest": { - "id": "TargetTcpProxiesSetBackendServiceRequest", - "properties": { - "service": { - "description": "The URL of the new BackendService resource for the targetTcpProxy.", - "type": "string" - } - }, - "type": "object" - }, - "TargetTcpProxiesSetProxyHeaderRequest": { - "id": "TargetTcpProxiesSetProxyHeaderRequest", - "properties": { - "proxyHeader": { - "description": "The new type of proxy header to append before sending data to the backend. NONE or PROXY_V1 are allowed.", - "enum": [ - "NONE", - "PROXY_V1" - ], - "enumDescriptions": [ - "", - "" - ], - "type": "string" - } - }, - "type": "object" - }, - "TargetTcpProxy": { - "description": "Represents a Target TCP Proxy resource. A target TCP proxy is a component of a TCP Proxy load balancer. Global forwarding rules reference target TCP proxy, and the target proxy then references an external backend service. For more information, read TCP Proxy Load Balancing overview.", - "id": "TargetTcpProxy", - "properties": { - "creationTimestamp": { - "description": "[Output Only] Creation timestamp in RFC3339 text format.", - "type": "string" - }, - "description": { - "description": "An optional description of this resource. Provide this property when you create the resource.", - "type": "string" - }, - "id": { - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", - "format": "uint64", - "type": "string" - }, - "kind": { - "default": "compute#targetTcpProxy", - "description": "[Output Only] Type of the resource. Always compute#targetTcpProxy for target TCP proxies.", - "type": "string" - }, - "name": { - "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "type": "string" - }, - "proxyBind": { - "description": "This field only applies when the forwarding rule that references this target proxy has a loadBalancingScheme set to INTERNAL_SELF_MANAGED. When this field is set to true, Envoy proxies set up inbound traffic interception and bind to the IP address and port specified in the forwarding rule. This is generally useful when using Traffic Director to configure Envoy as a gateway or middle proxy (in other words, not a sidecar proxy). The Envoy proxy listens for inbound requests and handles requests when it receives them. The default is false.", - "type": "boolean" - }, - "proxyHeader": { - "description": "Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE.", - "enum": [ - "NONE", - "PROXY_V1" - ], - "enumDescriptions": [ - "", - "" - ], - "type": "string" - }, - "selfLink": { - "description": "[Output Only] Server-defined URL for the resource.", - "type": "string" - }, - "service": { - "description": "URL to the BackendService resource.", - "type": "string" - } - }, - "type": "object" - }, "TargetTcpProxyList": { "description": "Contains a list of TargetTcpProxy resources.", "id": "TargetTcpProxyList", @@ -63550,7 +67265,7 @@ }, "defaultRouteAction": { "$ref": "HttpRouteAction", - "description": "defaultRouteAction takes effect when none of the hostRules match. The load balancer performs advanced routing actions, such as URL rewrites and header transformations, before forwarding the request to the selected backend. If defaultRouteAction specifies any weightedBackendServices, defaultService must not be set. Conversely if defaultService is set, defaultRouteAction cannot contain any weightedBackendServices. Only one of defaultRouteAction or defaultUrlRedirect must be set. UrlMaps for external HTTP(S) load balancers support only the urlRewrite action within defaultRouteAction. defaultRouteAction has no effect when the URL map is bound to a target gRPC proxy that has the validateForProxyless field set to true." + "description": "defaultRouteAction takes effect when none of the hostRules match. The load balancer performs advanced routing actions, such as URL rewrites and header transformations, before forwarding the request to the selected backend. If defaultRouteAction specifies any weightedBackendServices, defaultService must not be set. Conversely if defaultService is set, defaultRouteAction cannot contain any weightedBackendServices. Only one of defaultRouteAction or defaultUrlRedirect must be set. URL maps for Classic external HTTP(S) load balancers only support the urlRewrite action within defaultRouteAction. defaultRouteAction has no effect when the URL map is bound to a target gRPC proxy that has the validateForProxyless field set to true." }, "defaultService": { "description": "The full or partial URL of the defaultService resource to which traffic is directed if none of the hostRules match. If defaultRouteAction is also specified, advanced routing actions, such as URL rewrites, take effect before sending the request to the backend. However, if defaultService is specified, defaultRouteAction cannot contain any weightedBackendServices. Conversely, if routeAction specifies any weightedBackendServices, service must not be specified. Only one of defaultService, defaultUrlRedirect , or defaultRouteAction.weightedBackendService must be set. defaultService has no effect when the URL map is bound to a target gRPC proxy that has the validateForProxyless field set to true.", @@ -64589,7 +68304,7 @@ "type": "string" }, "labelFingerprint": { - "description": "A fingerprint for the labels being applied to this VpnGateway, which is essentially a hash of the labels set used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update labels. You must always provide an up-to-date fingerprint hash in order to update or change labels, otherwise the request will fail with error 412 conditionNotMet. To see the latest fingerprint, make a get() request to retrieve an VpnGateway.", + "description": "A fingerprint for the labels being applied to this VpnGateway, which is essentially a hash of the labels set used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update labels. You must always provide an up-to-date fingerprint hash in order to update or change labels, otherwise the request will fail with error 412 conditionNotMet. To see the latest fingerprint, make a get() request to retrieve a VpnGateway.", "format": "byte", "type": "string" }, @@ -64628,7 +68343,7 @@ "type": "string" }, "stackType": { - "description": "The stack type for this VPN gateway to identify the IP protocols that are enabled. If not specified, IPV4_ONLY will be used.", + "description": "The stack type for this VPN gateway to identify the IP protocols that are enabled. Possible values are: IPV4_ONLY, IPV4_IPV6. If not specified, IPV4_ONLY will be used.", "enum": [ "IPV4_IPV6", "IPV4_ONLY" @@ -64995,11 +68710,11 @@ "type": "integer" }, "interconnectAttachment": { - "description": "URL of the VLAN attachment (interconnectAttachment) resource for this VPN gateway interface. When the value of this field is present, the VPN gateway is used for IPsec-encrypted Cloud Interconnect; all egress or ingress traffic for this VPN gateway interface goes through the specified VLAN attachment resource. Not currently available publicly. ", + "description": "URL of the VLAN attachment (interconnectAttachment) resource for this VPN gateway interface. When the value of this field is present, the VPN gateway is used for HA VPN over Cloud Interconnect; all egress or ingress traffic for this VPN gateway interface goes through the specified VLAN attachment resource.", "type": "string" }, "ipAddress": { - "description": "[Output Only] IP address for this VPN interface associated with the VPN gateway. The IP address could be either a regional external IP address or a regional internal IP address. The two IP addresses for a VPN gateway must be all regional external or regional internal IP addresses. There cannot be a mix of regional external IP addresses and regional internal IP addresses. For IPsec-encrypted Cloud Interconnect, the IP addresses for both interfaces could either be regional internal IP addresses or regional external IP addresses. For regular (non IPsec-encrypted Cloud Interconnect) HA VPN tunnels, the IP address must be a regional external IP address.", + "description": "[Output Only] IP address for this VPN interface associated with the VPN gateway. The IP address could be either a regional external IP address or a regional internal IP address. The two IP addresses for a VPN gateway must be all regional external or regional internal IP addresses. There cannot be a mix of regional external IP addresses and regional internal IP addresses. For HA VPN over Cloud Interconnect, the IP addresses for both interfaces could either be regional internal IP addresses or regional external IP addresses. For regular (non HA VPN over Cloud Interconnect) HA VPN tunnels, the IP address must be a regional external IP address.", "type": "string" } }, @@ -65169,7 +68884,7 @@ "type": "string" }, "peerExternalGatewayInterface": { - "description": "The interface ID of the external VPN gateway to which this VPN tunnel is connected. Provided by the client when the VPN tunnel is created.", + "description": "The interface ID of the external VPN gateway to which this VPN tunnel is connected. Provided by the client when the VPN tunnel is created. Possible values are: `0`, `1`, `2`, `3`. The number of IDs in use depends on the external VPN gateway redundancy type.", "format": "int32", "type": "integer" }, @@ -65251,7 +68966,7 @@ "type": "string" }, "vpnGatewayInterface": { - "description": "The interface ID of the VPN gateway with which this VPN tunnel is associated.", + "description": "The interface ID of the VPN gateway with which this VPN tunnel is associated. Possible values are: `0`, `1`.", "format": "int32", "type": "integer" } @@ -65637,6 +69352,11 @@ "id": { "description": "Expression ID should uniquely identify the origin of the expression. E.g. owasp-crs-v020901-id973337 identifies Owasp core rule set version 2.9.1 rule id 973337. The ID could be used to determine the individual attack definition that has been detected. It could also be used to exclude it from the policy in case of false positive. required", "type": "string" + }, + "sensitivity": { + "description": "The sensitivity value associated with the WAF rule ID. This corresponds to the ModSecurity paranoia level, ranging from 1 to 4. 0 is reserved for opt-in only rules.", + "format": "int32", + "type": "integer" } }, "type": "object" diff --git a/vendor/google.golang.org/api/compute/v1/compute-gen.go b/vendor/google.golang.org/api/compute/v1/compute-gen.go index 042a333ec..c30ae0d4e 100644 --- a/vendor/google.golang.org/api/compute/v1/compute-gen.go +++ b/vendor/google.golang.org/api/compute/v1/compute-gen.go @@ -1,4 +1,4 @@ -// Copyright 2022 Google LLC. +// Copyright 2023 Google LLC. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -8,35 +8,35 @@ // // For product documentation, see: https://cloud.google.com/compute/ // -// Creating a client +// # Creating a client // // Usage example: // -// import "google.golang.org/api/compute/v1" -// ... -// ctx := context.Background() -// computeService, err := compute.NewService(ctx) +// import "google.golang.org/api/compute/v1" +// ... +// ctx := context.Background() +// computeService, err := compute.NewService(ctx) // // In this example, Google Application Default Credentials are used for authentication. // // For information on how to create and obtain Application Default Credentials, see https://developers.google.com/identity/protocols/application-default-credentials. // -// Other authentication options +// # Other authentication options // // By default, all available scopes (see "Constants") are used to authenticate. To restrict scopes, use option.WithScopes: // -// computeService, err := compute.NewService(ctx, option.WithScopes(compute.DevstorageReadWriteScope)) +// computeService, err := compute.NewService(ctx, option.WithScopes(compute.DevstorageReadWriteScope)) // // To use an API key for authentication (note: some APIs do not support API keys), use option.WithAPIKey: // -// computeService, err := compute.NewService(ctx, option.WithAPIKey("AIza...")) +// computeService, err := compute.NewService(ctx, option.WithAPIKey("AIza...")) // // To use an OAuth token (e.g., a user token obtained via a three-legged OAuth flow), use option.WithTokenSource: // -// config := &oauth2.Config{...} -// // ... -// token, err := config.Exchange(ctx, ...) -// computeService, err := compute.NewService(ctx, option.WithTokenSource(config.TokenSource(ctx, token))) +// config := &oauth2.Config{...} +// // ... +// token, err := config.Exchange(ctx, ...) +// computeService, err := compute.NewService(ctx, option.WithTokenSource(config.TokenSource(ctx, token))) // // See https://godoc.org/google.golang.org/api/option/ for details on options. package compute // import "google.golang.org/api/compute/v1" @@ -177,6 +177,7 @@ func New(client *http.Client) (*Service, error) { s.Licenses = NewLicensesService(s) s.MachineImages = NewMachineImagesService(s) s.MachineTypes = NewMachineTypesService(s) + s.NetworkAttachments = NewNetworkAttachmentsService(s) s.NetworkEdgeSecurityServices = NewNetworkEdgeSecurityServicesService(s) s.NetworkEndpointGroups = NewNetworkEndpointGroupsService(s) s.NetworkFirewallPolicies = NewNetworkFirewallPoliciesService(s) @@ -197,6 +198,7 @@ func New(client *http.Client) (*Service, error) { s.RegionHealthChecks = NewRegionHealthChecksService(s) s.RegionInstanceGroupManagers = NewRegionInstanceGroupManagersService(s) s.RegionInstanceGroups = NewRegionInstanceGroupsService(s) + s.RegionInstanceTemplates = NewRegionInstanceTemplatesService(s) s.RegionInstances = NewRegionInstancesService(s) s.RegionNetworkEndpointGroups = NewRegionNetworkEndpointGroupsService(s) s.RegionNetworkFirewallPolicies = NewRegionNetworkFirewallPoliciesService(s) @@ -204,8 +206,10 @@ func New(client *http.Client) (*Service, error) { s.RegionOperations = NewRegionOperationsService(s) s.RegionSecurityPolicies = NewRegionSecurityPoliciesService(s) s.RegionSslCertificates = NewRegionSslCertificatesService(s) + s.RegionSslPolicies = NewRegionSslPoliciesService(s) s.RegionTargetHttpProxies = NewRegionTargetHttpProxiesService(s) s.RegionTargetHttpsProxies = NewRegionTargetHttpsProxiesService(s) + s.RegionTargetTcpProxies = NewRegionTargetTcpProxiesService(s) s.RegionUrlMaps = NewRegionUrlMapsService(s) s.Regions = NewRegionsService(s) s.Reservations = NewReservationsService(s) @@ -305,6 +309,8 @@ type Service struct { MachineTypes *MachineTypesService + NetworkAttachments *NetworkAttachmentsService + NetworkEdgeSecurityServices *NetworkEdgeSecurityServicesService NetworkEndpointGroups *NetworkEndpointGroupsService @@ -345,6 +351,8 @@ type Service struct { RegionInstanceGroups *RegionInstanceGroupsService + RegionInstanceTemplates *RegionInstanceTemplatesService + RegionInstances *RegionInstancesService RegionNetworkEndpointGroups *RegionNetworkEndpointGroupsService @@ -359,10 +367,14 @@ type Service struct { RegionSslCertificates *RegionSslCertificatesService + RegionSslPolicies *RegionSslPoliciesService + RegionTargetHttpProxies *RegionTargetHttpProxiesService RegionTargetHttpsProxies *RegionTargetHttpsProxiesService + RegionTargetTcpProxies *RegionTargetTcpProxiesService + RegionUrlMaps *RegionUrlMapsService Regions *RegionsService @@ -718,6 +730,15 @@ type MachineTypesService struct { s *Service } +func NewNetworkAttachmentsService(s *Service) *NetworkAttachmentsService { + rs := &NetworkAttachmentsService{s: s} + return rs +} + +type NetworkAttachmentsService struct { + s *Service +} + func NewNetworkEdgeSecurityServicesService(s *Service) *NetworkEdgeSecurityServicesService { rs := &NetworkEdgeSecurityServicesService{s: s} return rs @@ -898,6 +919,15 @@ type RegionInstanceGroupsService struct { s *Service } +func NewRegionInstanceTemplatesService(s *Service) *RegionInstanceTemplatesService { + rs := &RegionInstanceTemplatesService{s: s} + return rs +} + +type RegionInstanceTemplatesService struct { + s *Service +} + func NewRegionInstancesService(s *Service) *RegionInstancesService { rs := &RegionInstancesService{s: s} return rs @@ -961,6 +991,15 @@ type RegionSslCertificatesService struct { s *Service } +func NewRegionSslPoliciesService(s *Service) *RegionSslPoliciesService { + rs := &RegionSslPoliciesService{s: s} + return rs +} + +type RegionSslPoliciesService struct { + s *Service +} + func NewRegionTargetHttpProxiesService(s *Service) *RegionTargetHttpProxiesService { rs := &RegionTargetHttpProxiesService{s: s} return rs @@ -979,6 +1018,15 @@ type RegionTargetHttpsProxiesService struct { s *Service } +func NewRegionTargetTcpProxiesService(s *Service) *RegionTargetTcpProxiesService { + rs := &RegionTargetTcpProxiesService{s: s} + return rs +} + +type RegionTargetTcpProxiesService struct { + s *Service +} + func NewRegionUrlMapsService(s *Service) *RegionUrlMapsService { rs := &RegionUrlMapsService{s: s} return rs @@ -1871,9 +1919,10 @@ func (s *AcceleratorTypesScopedListWarningData) MarshalJSON() ([]byte, error) { type AccessConfig struct { // ExternalIpv6: The first IPv6 address of the external IPv6 range // associated with this instance, prefix length is stored in - // externalIpv6PrefixLength in ipv6AccessConfig. The field is output - // only, an IPv6 address from a subnetwork associated with the instance - // will be allocated dynamically. + // externalIpv6PrefixLength in ipv6AccessConfig. To use a static + // external IP address, it must be unused and in the same region as the + // instance's zone. If not specified, Google Cloud will automatically + // assign an external IPv6 address from the instance's subnetwork. ExternalIpv6 string `json:"externalIpv6,omitempty"` // ExternalIpv6PrefixLength: The prefix length of the external IPv6 @@ -2002,6 +2051,16 @@ type Address struct { // "UNSPECIFIED_VERSION" IpVersion string `json:"ipVersion,omitempty"` + // Ipv6EndpointType: The endpoint type of this address, which should be + // VM or NETLB. This is used for deciding which type of endpoint this + // address can be used after the external IPv6 address reservation. + // + // Possible values: + // "NETLB" - Reserved IPv6 address can be used on network load + // balancer. + // "VM" - Reserved IPv6 address can be used on VM. + Ipv6EndpointType string `json:"ipv6EndpointType,omitempty"` + // Kind: [Output Only] Type of the resource. Always compute#address for // addresses. Kind string `json:"kind,omitempty"` @@ -2052,20 +2111,20 @@ type Address struct { // NAT_AUTO for the regional external IP addresses used by Cloud NAT // when allocating addresses using automatic NAT IP address allocation. // - IPSEC_INTERCONNECT for addresses created from a private IP range - // that are reserved for a VLAN attachment in an *IPsec-encrypted Cloud + // that are reserved for a VLAN attachment in an *HA VPN over Cloud // Interconnect* configuration. These addresses are regional resources. - // Not currently available publicly. - `SHARED_LOADBALANCER_VIP` for an - // internal IP address that is assigned to multiple internal forwarding - // rules. - `PRIVATE_SERVICE_CONNECT` for a private network address that - // is used to configure Private Service Connect. Only global internal - // addresses can use this purpose. + // - `SHARED_LOADBALANCER_VIP` for an internal IP address that is + // assigned to multiple internal forwarding rules. - + // `PRIVATE_SERVICE_CONNECT` for a private network address that is used + // to configure Private Service Connect. Only global internal addresses + // can use this purpose. // // Possible values: // "DNS_RESOLVER" - DNS resolver address in the subnetwork. // "GCE_ENDPOINT" - VM internal/alias IP, Internal LB service IP, etc. // "IPSEC_INTERCONNECT" - A regional internal IP address range - // reserved for the VLAN attachment that is used in IPsec-encrypted - // Cloud Interconnect. This regional internal IP address range must not + // reserved for the VLAN attachment that is used in HA VPN over Cloud + // Interconnect. This regional internal IP address range must not // overlap with any IP address range of subnet/route in the VPC network // and its peering networks. After the VLAN attachment is created with // the reserved IP address range, when creating a new VPN gateway, its @@ -2710,6 +2769,13 @@ type AdvancedMachineFeatures struct { // processor is assumed. ThreadsPerCore int64 `json:"threadsPerCore,omitempty"` + // VisibleCoreCount: The number of physical cores to expose to an + // instance. Multiply by the number of threads per core to compute the + // total number of virtual CPUs to expose to the instance. If unset, the + // number of cores is inferred from the instance's nominal CPU count and + // the underlying platform's SMT width. + VisibleCoreCount int64 `json:"visibleCoreCount,omitempty"` + // ForceSendFields is a list of field names (e.g. // "EnableNestedVirtualization") to unconditionally include in API // requests. By default, fields with empty or default values are omitted @@ -2774,6 +2840,68 @@ func (s *AliasIpRange) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// AllocationResourceStatus: [Output Only] Contains output only fields. +type AllocationResourceStatus struct { + // SpecificSkuAllocation: Allocation Properties of this reservation. + SpecificSkuAllocation *AllocationResourceStatusSpecificSKUAllocation `json:"specificSkuAllocation,omitempty"` + + // ForceSendFields is a list of field names (e.g. + // "SpecificSkuAllocation") to unconditionally include in API requests. + // By default, fields with empty or default values are omitted from API + // requests. However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "SpecificSkuAllocation") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *AllocationResourceStatus) MarshalJSON() ([]byte, error) { + type NoMethod AllocationResourceStatus + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// AllocationResourceStatusSpecificSKUAllocation: Contains Properties +// set for the reservation. +type AllocationResourceStatusSpecificSKUAllocation struct { + // SourceInstanceTemplateId: ID of the instance template used to + // populate reservation properties. + SourceInstanceTemplateId string `json:"sourceInstanceTemplateId,omitempty"` + + // ForceSendFields is a list of field names (e.g. + // "SourceInstanceTemplateId") to unconditionally include in API + // requests. By default, fields with empty or default values are omitted + // from API requests. However, any non-pointer, non-interface field + // appearing in ForceSendFields will be sent to the server regardless of + // whether the field is empty or not. This may be used to include empty + // fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "SourceInstanceTemplateId") + // to include in API requests with the JSON null value. By default, + // fields with empty values are omitted from API requests. However, any + // field with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *AllocationResourceStatusSpecificSKUAllocation) MarshalJSON() ([]byte, error) { + type NoMethod AllocationResourceStatusSpecificSKUAllocation + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type AllocationSpecificSKUAllocationAllocatedInstancePropertiesReservedDisk struct { // DiskSizeGb: Specifies the size of the disk in base-2 GB. DiskSizeGb int64 `json:"diskSizeGb,omitempty,string"` @@ -2875,6 +3003,17 @@ type AllocationSpecificSKUReservation struct { // InstanceProperties: The instance properties for the reservation. InstanceProperties *AllocationSpecificSKUAllocationReservedInstanceProperties `json:"instanceProperties,omitempty"` + // SourceInstanceTemplate: Specifies the instance template to create the + // reservation. If you use this field, you must exclude the + // instanceProperties field. This field is optional, and it can be a + // full or partial URL. For example, the following are all valid URLs to + // an instance template: - + // https://www.googleapis.com/compute/v1/projects/project + // /global/instanceTemplates/instanceTemplate - + // projects/project/global/instanceTemplates/instanceTemplate - + // global/instanceTemplates/instanceTemplate + SourceInstanceTemplate string `json:"sourceInstanceTemplate,omitempty"` + // ForceSendFields is a list of field names (e.g. "AssuredCount") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any @@ -2949,6 +3088,11 @@ type AttachedDisk struct { // DiskSizeGb: The size of the disk in GB. DiskSizeGb int64 `json:"diskSizeGb,omitempty,string"` + // ForceAttach: [Input Only] Whether to force attach the regional disk + // even if it's currently attached to another instance. If you try to + // force attach a zonal disk to an instance, you will receive an error. + ForceAttach bool `json:"forceAttach,omitempty"` + // GuestOsFeatures: A list of features to enable on the guest operating // system. Applicable only for bootable images. Read Enabling guest // operating system features to see a list of available options. @@ -2967,11 +3111,10 @@ type AttachedDisk struct { InitializeParams *AttachedDiskInitializeParams `json:"initializeParams,omitempty"` // Interface: Specifies the disk interface to use for attaching this - // disk, which is either SCSI or NVME. The default is SCSI. Persistent - // disks must always use SCSI and the request will fail if you attempt - // to attach a persistent disk in any other format than SCSI. Local SSDs - // can use either NVME or SCSI. For performance characteristics of SCSI - // over NVMe, see Local SSD performance. + // disk, which is either SCSI or NVME. For most machine types, the + // default is SCSI. Local SSDs can use either NVME or SCSI. In certain + // configurations, persistent disks can use NVMe. For more information, + // see About persistent disks. // // Possible values: // "NVME" @@ -3082,13 +3225,15 @@ type AttachedDiskInitializeParams struct { // URL. For example: // https://www.googleapis.com/compute/v1/projects/project/zones/zone // /diskTypes/pd-standard For a full list of acceptable values, see - // Persistent disk types. If you define this field, you can provide - // either the full or partial URL. For example, the following are valid - // values: - + // Persistent disk types. If you specify this field when creating a VM, + // you can provide either the full or partial URL. For example, the + // following values are valid: - // https://www.googleapis.com/compute/v1/projects/project/zones/zone // /diskTypes/diskType - projects/project/zones/zone/diskTypes/diskType - // - zones/zone/diskTypes/diskType Note that for InstanceTemplate, this - // is the name of the disk type, not URL. + // - zones/zone/diskTypes/diskType If you specify this field when + // creating or updating an instance template or all-instances + // configuration, specify the type of the disk, not the URL. For + // example: pd-standard. DiskType string `json:"diskType,omitempty"` // Labels: Labels to apply to this disk. These can be later modified by @@ -3118,6 +3263,13 @@ type AttachedDiskInitializeParams struct { // see the Extreme persistent disk documentation. ProvisionedIops int64 `json:"provisionedIops,omitempty,string"` + // ResourceManagerTags: Resource manager tags to be bound to the disk. + // Tag keys and values have the same definition as resource manager + // tags. Keys must be in the format `tagKeys/{tag_key_id}`, and values + // are in the format `tagValues/456`. The field is ignored (both PUT & + // PATCH) when empty. + ResourceManagerTags map[string]string `json:"resourceManagerTags,omitempty"` + // ResourcePolicies: Resource policies applied to this disk for // automatic snapshot creations. Specified using the full or partial // URL. For instance template, specify only the resource policy name. @@ -3142,10 +3294,10 @@ type AttachedDiskInitializeParams struct { // SourceImageEncryptionKey: The customer-supplied encryption key of the // source image. Required if the source image is protected by a - // customer-supplied encryption key. Instance templates do not store - // customer-supplied encryption keys, so you cannot create disks for - // instances in a managed instance group if the source images are - // encrypted with your own keys. + // customer-supplied encryption key. InstanceTemplate and + // InstancePropertiesPatch do not store customer-supplied encryption + // keys, so you cannot create disks for instances in a managed instance + // group if the source images are encrypted with your own keys. SourceImageEncryptionKey *CustomerEncryptionKey `json:"sourceImageEncryptionKey,omitempty"` // SourceSnapshot: The source snapshot to create this disk. When @@ -4551,7 +4703,9 @@ type Backend struct { // offering 0% of its available capacity. The valid ranges are 0.0 and // [0.1,1.0]. You cannot configure a setting larger than 0 and smaller // than 0.1. You cannot configure a setting of 0 when there is only one - // backend attached to the backend service. + // backend attached to the backend service. Not available with backends + // that don't support using a balancingMode. This includes backends such + // as global internet NEGs, regional serverless NEGs, and PSC NEGs. CapacityScaler float64 `json:"capacityScaler,omitempty"` // Description: An optional description of this resource. Provide this @@ -4666,6 +4820,16 @@ type BackendBucket struct { // CdnPolicy: Cloud CDN configuration for this BackendBucket. CdnPolicy *BackendBucketCdnPolicy `json:"cdnPolicy,omitempty"` + // CompressionMode: Compress text responses using Brotli or gzip + // compression, based on the client's Accept-Encoding header. + // + // Possible values: + // "AUTOMATIC" - Automatically uses the best compression based on the + // Accept-Encoding header sent by the client. + // "DISABLED" - Disables compression. Existing compressed responses + // cached by Cloud CDN will not be served to clients. + CompressionMode string `json:"compressionMode,omitempty"` + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. CreationTimestamp string `json:"creationTimestamp,omitempty"` @@ -5222,6 +5386,16 @@ type BackendService struct { CircuitBreakers *CircuitBreakers `json:"circuitBreakers,omitempty"` + // CompressionMode: Compress text responses using Brotli or gzip + // compression, based on the client's Accept-Encoding header. + // + // Possible values: + // "AUTOMATIC" - Automatically uses the best compression based on the + // Accept-Encoding header sent by the client. + // "DISABLED" - Disables compression. Existing compressed responses + // cached by Cloud CDN will not be served to clients. + CompressionMode string `json:"compressionMode,omitempty"` + ConnectionDraining *ConnectionDraining `json:"connectionDraining,omitempty"` // ConnectionTrackingPolicy: Connection Tracking configuration for this @@ -5328,12 +5502,16 @@ type BackendService struct { // "INVALID_LOAD_BALANCING_SCHEME" LoadBalancingScheme string `json:"loadBalancingScheme,omitempty"` - // LocalityLbPolicies: A list of locality load balancing policies to be - // used in order of preference. Either the policy or the customPolicy - // field should be set. Overrides any value set in the localityLbPolicy - // field. localityLbPolicies is only supported when the BackendService - // is referenced by a URL Map that is referenced by a target gRPC proxy - // that has the validateForProxyless field set to true. + // LocalityLbPolicies: A list of locality load-balancing policies to be + // used in order of preference. When you use localityLbPolicies, you + // must set at least one value for either the + // localityLbPolicies[].policy or the localityLbPolicies[].customPolicy + // field. localityLbPolicies overrides any value set in the + // localityLbPolicy field. For an example of how to use this field, see + // Define a list of preferred policies. Caution: This field and its + // children are intended for use in a service mesh that includes gRPC + // clients only. Envoy proxies can't use backend services that have this + // configuration. LocalityLbPolicies []*BackendServiceLocalityLoadBalancingPolicyConfig `json:"localityLbPolicies,omitempty"` // LocalityLbPolicy: The load balancing algorithm used within the scope @@ -5385,6 +5563,16 @@ type BackendService struct { // of the requests. // "ROUND_ROBIN" - This is a simple policy in which each healthy // backend is selected in round robin order. This is the default. + // "WEIGHTED_MAGLEV" - Per-instance weighted Load Balancing via health + // check reported weights. If set, the Backend Service must configure a + // non legacy HTTP-based Health Check, and health check replies are + // expected to contain non-standard HTTP response header field + // X-Load-Balancing-Endpoint-Weight to specify the per-instance weights. + // If set, Load Balancing is weighted based on the per-instance weights + // reported in the last processed health check replies, as long as every + // instance either reported a valid weight or had UNAVAILABLE_WEIGHT. + // Otherwise, Load Balancing remains equal-weight. This option is only + // supported in Network Load Balancing. LocalityLbPolicy string `json:"localityLbPolicy,omitempty"` // LogConfig: This field denotes the logging options for the load @@ -5421,11 +5609,9 @@ type BackendService struct { // hosts from the load balancing pool for the backend service. If not // set, this feature is considered disabled. This field is applicable to // either: - A regional backend service with the service_protocol set to - // HTTP, HTTPS, or HTTP2, and load_balancing_scheme set to + // HTTP, HTTPS, HTTP2, or GRPC, and load_balancing_scheme set to // INTERNAL_MANAGED. - A global backend service with the - // load_balancing_scheme set to INTERNAL_SELF_MANAGED. Not supported - // when the backend service is referenced by a URL map that is bound to - // target gRPC proxy that has validateForProxyless field set to true. + // load_balancing_scheme set to INTERNAL_SELF_MANAGED. OutlierDetection *OutlierDetection `json:"outlierDetection,omitempty"` // Port: Deprecated in favor of portName. The TCP port to connect on the @@ -6465,12 +6651,13 @@ type BackendServiceLocalityLoadBalancingPolicyConfigCustomPolicy struct { // understood by a locally installed custom policy implementation. Data string `json:"data,omitempty"` - // Name: Identifies the custom policy. The value should match the type - // the custom implementation is registered with on the gRPC clients. It - // should follow protocol buffer message naming conventions and include - // the full path (e.g. myorg.CustomLbPolicy). The maximum length is 256 - // characters. Note that specifying the same custom policy more than - // once for a backend is not a valid configuration and will be rejected. + // Name: Identifies the custom policy. The value should match the name + // of a custom implementation registered on the gRPC clients. It should + // follow protocol buffer message naming conventions and include the + // full path (for example, myorg.CustomLbPolicy). The maximum length is + // 256 characters. Do not specify the same custom policy more than once + // for a backend. If you do, the configuration is rejected. For an + // example of how to use this field, see Use a custom policy. Name string `json:"name,omitempty"` // ForceSendFields is a list of field names (e.g. "Data") to @@ -6499,12 +6686,11 @@ func (s *BackendServiceLocalityLoadBalancingPolicyConfigCustomPolicy) MarshalJSO // BackendServiceLocalityLoadBalancingPolicyConfigPolicy: The // configuration for a built-in load balancing policy. type BackendServiceLocalityLoadBalancingPolicyConfigPolicy struct { - // Name: The name of a locality load balancer policy to be used. The - // value should be one of the predefined ones as supported by - // localityLbPolicy, although at the moment only ROUND_ROBIN is - // supported. This field should only be populated when the customPolicy - // field is not used. Note that specifying the same policy more than - // once for a backend is not a valid configuration and will be rejected. + // Name: The name of a locality load-balancing policy. Valid values + // include ROUND_ROBIN and, for Java clients, LEAST_REQUEST. For + // information about these values, see the description of + // localityLbPolicy. Do not specify the same policy more than once for a + // backend. If you do, the configuration is rejected. // // Possible values: // "INVALID_LB_POLICY" @@ -6527,6 +6713,16 @@ type BackendServiceLocalityLoadBalancingPolicyConfigPolicy struct { // of the requests. // "ROUND_ROBIN" - This is a simple policy in which each healthy // backend is selected in round robin order. This is the default. + // "WEIGHTED_MAGLEV" - Per-instance weighted Load Balancing via health + // check reported weights. If set, the Backend Service must configure a + // non legacy HTTP-based Health Check, and health check replies are + // expected to contain non-standard HTTP response header field + // X-Load-Balancing-Endpoint-Weight to specify the per-instance weights. + // If set, Load Balancing is weighted based on the per-instance weights + // reported in the last processed health check replies, as long as every + // instance either reported a valid weight or had UNAVAILABLE_WEIGHT. + // Otherwise, Load Balancing remains equal-weight. This option is only + // supported in Network Load Balancing. Name string `json:"name,omitempty"` // ForceSendFields is a list of field names (e.g. "Name") to @@ -6555,15 +6751,34 @@ func (s *BackendServiceLocalityLoadBalancingPolicyConfigPolicy) MarshalJSON() ([ // BackendServiceLogConfig: The available logging options for the load // balancer traffic served by this backend service. type BackendServiceLogConfig struct { - // Enable: This field denotes whether to enable logging for the load - // balancer traffic served by this backend service. + // Enable: Denotes whether to enable logging for the load balancer + // traffic served by this backend service. The default value is false. Enable bool `json:"enable,omitempty"` + // OptionalFields: This field can only be specified if logging is + // enabled for this backend service and "logConfig.optionalMode" was set + // to CUSTOM. Contains a list of optional fields you want to include in + // the logs. For example: serverInstance, serverGkeDetails.cluster, + // serverGkeDetails.pod.podNamespace + OptionalFields []string `json:"optionalFields,omitempty"` + + // OptionalMode: This field can only be specified if logging is enabled + // for this backend service. Configures whether all, none or a subset of + // optional fields should be added to the reported logs. One of + // [INCLUDE_ALL_OPTIONAL, EXCLUDE_ALL_OPTIONAL, CUSTOM]. Default is + // EXCLUDE_ALL_OPTIONAL. + // + // Possible values: + // "CUSTOM" - A subset of optional fields. + // "EXCLUDE_ALL_OPTIONAL" - None optional fields. + // "INCLUDE_ALL_OPTIONAL" - All optional fields. + OptionalMode string `json:"optionalMode,omitempty"` + // SampleRate: This field can only be specified if logging is enabled // for this backend service. The value of the field must be in [0, 1]. // This configures the sampling rate of requests to the load balancer // where 1.0 means all logged requests are reported and 0.0 means no - // logged requests are reported. The default value is 0.0. + // logged requests are reported. The default value is 1.0. SampleRate float64 `json:"sampleRate,omitempty"` // ForceSendFields is a list of field names (e.g. "Enable") to @@ -7064,19 +7279,26 @@ type Binding struct { // `allUsers`: A special identifier that represents anyone who is on the // internet; with or without a Google account. * // `allAuthenticatedUsers`: A special identifier that represents anyone - // who is authenticated with a Google account or a service account. * - // `user:{emailid}`: An email address that represents a specific Google - // account. For example, `alice@example.com` . * - // `serviceAccount:{emailid}`: An email address that represents a - // service account. For example, - // `my-other-app@appspot.gserviceaccount.com`. * `group:{emailid}`: An - // email address that represents a Google group. For example, - // `admins@example.com`. * `deleted:user:{emailid}?uid={uniqueid}`: An - // email address (plus unique identifier) representing a user that has - // been recently deleted. For example, - // `alice@example.com?uid=123456789012345678901`. If the user is - // recovered, this value reverts to `user:{emailid}` and the recovered - // user retains the role in the binding. * + // who is authenticated with a Google account or a service account. Does + // not include identities that come from external identity providers + // (IdPs) through identity federation. * `user:{emailid}`: An email + // address that represents a specific Google account. For example, + // `alice@example.com` . * `serviceAccount:{emailid}`: An email address + // that represents a Google service account. For example, + // `my-other-app@appspot.gserviceaccount.com`. * + // `serviceAccount:{projectid}.svc.id.goog[{namespace}/{kubernetes-sa}]`: + // An identifier for a Kubernetes service account + // (https://cloud.google.com/kubernetes-engine/docs/how-to/kubernetes-service-accounts). + // For example, `my-project.svc.id.goog[my-namespace/my-kubernetes-sa]`. + // * `group:{emailid}`: An email address that represents a Google group. + // For example, `admins@example.com`. * `domain:{domain}`: The G Suite + // domain (primary) that represents all the users of that domain. For + // example, `google.com` or `example.com`. * + // `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus + // unique identifier) representing a user that has been recently + // deleted. For example, `alice@example.com?uid=123456789012345678901`. + // If the user is recovered, this value reverts to `user:{emailid}` and + // the recovered user retains the role in the binding. * // `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address // (plus unique identifier) representing a service account that has been // recently deleted. For example, @@ -7088,9 +7310,7 @@ type Binding struct { // that has been recently deleted. For example, // `admins@example.com?uid=123456789012345678901`. If the group is // recovered, this value reverts to `group:{emailid}` and the recovered - // group retains the role in the binding. * `domain:{domain}`: The G - // Suite domain (primary) that represents all the users of that domain. - // For example, `google.com` or `example.com`. + // group retains the role in the binding. Members []string `json:"members,omitempty"` // Role: Role that is assigned to the list of `members`, or principals. @@ -7428,6 +7648,10 @@ type Commitment struct { // license commitment. LicenseResource *LicenseResourceCommitment `json:"licenseResource,omitempty"` + // MergeSourceCommitments: List of source commitments to be merged into + // a new commitment. + MergeSourceCommitments []string `json:"mergeSourceCommitments,omitempty"` + // Name: Name of the resource. Provided by the client when the resource // is created. The name must be 1-63 characters long, and comply with // RFC1035. Specifically, the name must be 1-63 characters long and @@ -7461,6 +7685,10 @@ type Commitment struct { // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` + // SplitSourceCommitment: Source commitment to be splitted into a new + // commitment. + SplitSourceCommitment string `json:"splitSourceCommitment,omitempty"` + // StartTimestamp: [Output Only] Commitment start time in RFC3339 text // format. StartTimestamp string `json:"startTimestamp,omitempty"` @@ -7471,7 +7699,8 @@ type Commitment struct { // // Possible values: // "ACTIVE" - // "CANCELLED" + // "CANCELLED" - Deprecate CANCELED status. Will use separate status + // to differentiate cancel by mergeCud or manual cancellation. // "CREATING" // "EXPIRED" // "NOT_YET_ACTIVE" @@ -7491,6 +7720,7 @@ type Commitment struct { // "ACCELERATOR_OPTIMIZED" // "COMPUTE_OPTIMIZED" // "COMPUTE_OPTIMIZED_C2D" + // "COMPUTE_OPTIMIZED_C3" // "GENERAL_PURPOSE" // "GENERAL_PURPOSE_E2" // "GENERAL_PURPOSE_N2" @@ -8313,7 +8543,9 @@ type CorsPolicy struct { // AllowOriginRegexes: Specifies a regular expression that matches // allowed origins. For more information about the regular expression // syntax, see Syntax. An origin is allowed if it matches either an item - // in allowOrigins or an item in allowOriginRegexes. + // in allowOrigins or an item in allowOriginRegexes. Regular expressions + // can only be used when the loadBalancingScheme is set to + // INTERNAL_SELF_MANAGED. AllowOriginRegexes []string `json:"allowOriginRegexes,omitempty"` // AllowOrigins: Specifies the list of origins that is allowed to do @@ -8628,6 +8860,10 @@ type Disk struct { // Options: Internal use only. Options string `json:"options,omitempty"` + // Params: Input only. [Input Only] Additional params passed with the + // request, but not persisted as part of resource payload. + Params *DiskParams `json:"params,omitempty"` + // PhysicalBlockSizeBytes: Physical block size of the persistent disk, // in bytes. If not present in a request, a default value is used. The // currently supported size is 4096, other sizes may be added in the @@ -9302,6 +9538,39 @@ func (s *DiskMoveRequest) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// DiskParams: Additional disk params. +type DiskParams struct { + // ResourceManagerTags: Resource manager tags to be bound to the disk. + // Tag keys and values have the same definition as resource manager + // tags. Keys must be in the format `tagKeys/{tag_key_id}`, and values + // are in the format `tagValues/456`. The field is ignored (both PUT & + // PATCH) when empty. + ResourceManagerTags map[string]string `json:"resourceManagerTags,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ResourceManagerTags") + // to unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ResourceManagerTags") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *DiskParams) MarshalJSON() ([]byte, error) { + type NoMethod DiskParams + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // DiskType: Represents a Disk Type resource. Google Compute Engine has // two Disk Type resources: * Regional // (/compute/docs/reference/rest/v1/regionDiskTypes) * Zonal @@ -10362,7 +10631,8 @@ type ErrorInfo struct { // Reason: The reason of the error. This is a constant value that // identifies the proximate cause of the error. Error reasons are unique // within a particular domain of errors. This should be at most 63 - // characters and match /[A-Z0-9_]+/. + // characters and match a regular expression of `A-Z+[A-Z0-9]`, which + // represents UPPER_SNAKE_CASE. Reason string `json:"reason,omitempty"` // ForceSendFields is a list of field names (e.g. "Domain") to @@ -11101,10 +11371,8 @@ type Firewall struct { DestinationRanges []string `json:"destinationRanges,omitempty"` // Direction: Direction of traffic to which this firewall applies, - // either `INGRESS` or `EGRESS`. The default is `INGRESS`. For `INGRESS` - // traffic, you cannot specify the destinationRanges field, and for - // `EGRESS` traffic, you cannot specify the sourceRanges or sourceTags - // fields. + // either `INGRESS` or `EGRESS`. The default is `INGRESS`. For `EGRESS` + // traffic, you cannot specify the sourceTags fields. // // Possible values: // "EGRESS" - Indicates that firewall should apply to outgoing @@ -11599,9 +11867,10 @@ type FirewallPolicy struct { // DisplayName: Deprecated, please use short name instead. User-provided // name of the Organization firewall policy. The name should be unique // in the organization in which the firewall policy is created. This - // name must be set on creation and cannot be changed. The name must be - // 1-63 characters long, and comply with RFC1035. Specifically, the name - // must be 1-63 characters long and match the regular expression + // field is not applicable to network firewall policies. This name must + // be set on creation and cannot be changed. The name must be 1-63 + // characters long, and comply with RFC1035. Specifically, the name must + // be 1-63 characters long and match the regular expression // `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be // a lowercase letter, and all following characters must be a dash, // lowercase letter, or digit, except the last character, which cannot @@ -11626,11 +11895,13 @@ type FirewallPolicy struct { // compute#firewallPolicyfor firewall policies Kind string `json:"kind,omitempty"` - // Name: [Output Only] Name of the resource. It is a numeric ID - // allocated by GCP which uniquely identifies the Firewall Policy. + // Name: Name of the resource. For Organization Firewall Policies it's a + // [Output Only] numeric ID allocated by Google Cloud which uniquely + // identifies the Organization Firewall Policy. Name string `json:"name,omitempty"` - // Parent: [Output Only] The parent of the firewall policy. + // Parent: [Output Only] The parent of the firewall policy. This field + // is not applicable to network firewall policies. Parent string `json:"parent,omitempty"` // Region: [Output Only] URL of the region where the regional firewall @@ -11656,15 +11927,16 @@ type FirewallPolicy struct { // with the resource id. SelfLinkWithId string `json:"selfLinkWithId,omitempty"` - // ShortName: User-provided name of the Organization firewall plicy. The - // name should be unique in the organization in which the firewall - // policy is created. This name must be set on creation and cannot be - // changed. The name must be 1-63 characters long, and comply with - // RFC1035. Specifically, the name must be 1-63 characters long and - // match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means - // the first character must be a lowercase letter, and all following - // characters must be a dash, lowercase letter, or digit, except the - // last character, which cannot be a dash. + // ShortName: User-provided name of the Organization firewall policy. + // The name should be unique in the organization in which the firewall + // policy is created. This field is not applicable to network firewall + // policies. This name must be set on creation and cannot be changed. + // The name must be 1-63 characters long, and comply with RFC1035. + // Specifically, the name must be 1-63 characters long and match the + // regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first + // character must be a lowercase letter, and all following characters + // must be a dash, lowercase letter, or digit, except the last + // character, which cannot be a dash. ShortName string `json:"shortName,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -11932,8 +12204,7 @@ func (s *FirewallPolicyListWarningData) MarshalJSON() ([]byte, error) { // matches this condition (allow or deny). type FirewallPolicyRule struct { // Action: The Action to perform when the client connection triggers the - // rule. Can currently be either "allow" or "deny()" where valid values - // for status are 403, 404, and 502. + // rule. Valid actions are "allow", "deny" and "goto_next". Action string `json:"action,omitempty"` // Description: An optional description for this resource. @@ -12213,10 +12484,11 @@ type ForwardingRule struct { // an existing static (reserved) IP address resource. When omitted, // Google Cloud assigns an ephemeral IP address. Use one of the // following formats to specify an IP address while creating a - // forwarding rule: * IP address number, as in `100.1.2.3` * Full - // resource URL, as in - // https://www.googleapis.com/compute/v1/projects/project_id/regions/region - // /addresses/address-name * Partial URL or by name, as in: - + // forwarding rule: * IP address number, as in `100.1.2.3` * IPv6 + // address range, as in `2600:1234::/96` * Full resource URL, as in + // https://www.googleapis.com/compute/v1/projects/ + // project_id/regions/region/addresses/address-name * Partial URL or by + // name, as in: - // projects/project_id/regions/region/addresses/address-name - // regions/region/addresses/address-name - global/addresses/address-name // - address-name The forwarding rule's target or backendService, and in @@ -12244,12 +12516,14 @@ type ForwardingRule struct { // "UDP" IPProtocol string `json:"IPProtocol,omitempty"` - // AllPorts: This field is used along with the backend_service field for - // Internal TCP/UDP Load Balancing or Network Load Balancing, or with - // the target field for internal and external TargetInstance. You can - // only use one of ports and port_range, or allPorts. The three are - // mutually exclusive. For TCP, UDP and SCTP traffic, packets addressed - // to any ports will be forwarded to the target or backendService. + // AllPorts: This field can only be used: - If IPProtocol is one of TCP, + // UDP, or SCTP. - By internal TCP/UDP load balancers, backend + // service-based network load balancers, and internal and external + // protocol forwarding. Set this field to true to allow packets + // addressed to any port or packets lacking destination port information + // (for example, UDP fragments after the first fragment) to be forwarded + // to the backends configured with this forwarding rule. The ports, + // port_range, and allPorts fields are mutually exclusive. AllPorts bool `json:"allPorts,omitempty"` // AllowGlobalAccess: This field is used along with the backend_service @@ -12265,6 +12539,14 @@ type ForwardingRule struct { // load balancer types. BackendService string `json:"backendService,omitempty"` + // BaseForwardingRule: [Output Only] The URL for the corresponding base + // Forwarding Rule. By base Forwarding Rule, we mean the Forwarding Rule + // that has the same IP address, protocol, and port settings with the + // current Forwarding Rule, but without sourceIPRanges specified. Always + // empty if the current Forwarding Rule does not have sourceIPRanges + // specified. + BaseForwardingRule string `json:"baseForwardingRule,omitempty"` + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. CreationTimestamp string `json:"creationTimestamp,omitempty"` @@ -12398,28 +12680,37 @@ type ForwardingRule struct { // Non-PSC forwarding rules do not use this field. NoAutomateDnsZone bool `json:"noAutomateDnsZone,omitempty"` - // PortRange: This field can be used only if: - Load balancing scheme is - // one of EXTERNAL, INTERNAL_SELF_MANAGED or INTERNAL_MANAGED - - // IPProtocol is one of TCP, UDP, or SCTP. Packets addressed to ports in - // the specified range will be forwarded to target or backend_service. - // You can only use one of ports, port_range, or allPorts. The three are - // mutually exclusive. Forwarding rules with the same [IPAddress, - // IPProtocol] pair must have disjoint ports. Some types of forwarding - // target have constraints on the acceptable ports. For more - // information, see Port specifications - // (https://cloud.google.com/load-balancing/docs/forwarding-rule-concepts#port_specifications). - // @pattern: \\d+(?:-\\d+)? + // PortRange: This field can only be used: - If IPProtocol is one of + // TCP, UDP, or SCTP. - By backend service-based network load balancers, + // target pool-based network load balancers, internal proxy load + // balancers, external proxy load balancers, Traffic Director, external + // protocol forwarding, and Classic VPN. Some products have restrictions + // on what ports can be used. See port specifications for details. Only + // packets addressed to ports in the specified range will be forwarded + // to the backends configured with this forwarding rule. The ports, + // port_range, and allPorts fields are mutually exclusive. For external + // forwarding rules, two or more forwarding rules cannot use the same + // [IPAddress, IPProtocol] pair, and cannot have overlapping portRanges. + // For internal forwarding rules within the same VPC network, two or + // more forwarding rules cannot use the same [IPAddress, IPProtocol] + // pair, and cannot have overlapping portRanges. @pattern: + // \\d+(?:-\\d+)? PortRange string `json:"portRange,omitempty"` - // Ports: The ports field is only supported when the forwarding rule - // references a backend_service directly. Only packets addressed to the - // specified list of ports - // ((https://cloud.google.com/load-balancing/docs/forwarding-rule-concept - // s#port_specifications)) are forwarded to backends. You can only use - // one of ports and port_range, or allPorts. The three are mutually - // exclusive. You can specify a list of up to five ports, which can be - // non-contiguous. Forwarding rules with the same [IPAddress, - // IPProtocol] pair must have disjoint ports. @pattern: \\d+(?:-\\d+)? + // Ports: This field can only be used: - If IPProtocol is one of TCP, + // UDP, or SCTP. - By internal TCP/UDP load balancers, backend + // service-based network load balancers, and internal protocol + // forwarding. You can specify a list of up to five ports by number, + // separated by commas. The ports can be contiguous or discontiguous. + // Only packets addressed to these ports will be forwarded to the + // backends configured with this forwarding rule. For external + // forwarding rules, two or more forwarding rules cannot use the same + // [IPAddress, IPProtocol] pair, and cannot share any values defined in + // ports. For internal forwarding rules within the same VPC network, two + // or more forwarding rules cannot use the same [IPAddress, IPProtocol] + // pair, and cannot share any values defined in ports. The ports, + // port_range, and allPorts fields are mutually exclusive. @pattern: + // \\d+(?:-\\d+)? Ports []string `json:"ports,omitempty"` // PscConnectionId: [Output Only] The PSC connection id of the PSC @@ -12468,6 +12759,15 @@ type ForwardingRule struct { // balancing. ServiceName string `json:"serviceName,omitempty"` + // SourceIpRanges: If not empty, this Forwarding Rule will only forward + // the traffic when the source IP address matches one of the IP + // addresses or CIDR ranges set here. Note that a Forwarding Rule can + // only have up to 64 source IP ranges, and this field can only be used + // with a regional Forwarding Rule whose scheme is EXTERNAL. Each + // source_ip_range entry should be either an IP address (for example, + // 1.2.3.4) or a CIDR range (for example, 1.2.3.0/24). + SourceIpRanges []string `json:"sourceIpRanges,omitempty"` + // Subnetwork: This field identifies the subnetwork that the load // balanced IP should belong to for this Forwarding Rule, used in // internal load balancing and network load balancing with IPv6. If the @@ -12476,6 +12776,19 @@ type ForwardingRule struct { // subnet mode or when creating external forwarding rule with IPv6. Subnetwork string `json:"subnetwork,omitempty"` + // Target: The URL of the target resource to receive the matched + // traffic. For regional forwarding rules, this target must be in the + // same region as the forwarding rule. For global forwarding rules, this + // target must be a global load balancing resource. The forwarded + // traffic must be of a type appropriate to the target object. - For + // load balancers, see the "Target" column in Port specifications + // (https://cloud.google.com/load-balancing/docs/forwarding-rule-concepts#ip_address_specifications). + // - For Private Service Connect forwarding rules that forward traffic + // to Google APIs, provide the name of a supported Google API bundle: - + // vpc-sc - APIs that support VPC Service Controls. - all-apis - All + // supported Google APIs. - For Private Service Connect forwarding rules + // that forward traffic to managed services, the target must be a + // service attachment. Target string `json:"target,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -13133,35 +13446,45 @@ type GRPCHealthCheck struct { // The grpc_service_name can only be ASCII. GrpcServiceName string `json:"grpcServiceName,omitempty"` - // Port: The port number for the health check request. Must be specified - // if port_name and port_specification are not set or if - // port_specification is USE_FIXED_PORT. Valid values are 1 through - // 65535. + // Port: The TCP port number to which the health check prober sends + // packets. Valid values are 1 through 65535. Port int64 `json:"port,omitempty"` - // PortName: Port name as defined in InstanceGroup#NamedPort#name. If - // both port and port_name are defined, port takes precedence. The - // port_name should conform to RFC1035. + // PortName: Not supported. PortName string `json:"portName,omitempty"` - // PortSpecification: Specifies how port is selected for health - // checking, can be one of following values: USE_FIXED_PORT: The port - // number in port is used for health checking. USE_NAMED_PORT: The - // portName is used for health checking. USE_SERVING_PORT: For - // NetworkEndpointGroup, the port specified for each network endpoint is - // used for health checking. For other backends, the port or named port - // specified in the Backend Service is used for health checking. If not - // specified, gRPC health check follows behavior specified in port and - // portName fields. + // PortSpecification: Specifies how a port is selected for health + // checking. Can be one of the following values: USE_FIXED_PORT: + // Specifies a port number explicitly using the port field in the health + // check. Supported by backend services for pass-through load balancers + // and backend services for proxy load balancers. Not supported by + // target pools. The health check supports all backends supported by the + // backend service provided the backend can be health checked. For + // example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network + // endpoint groups, and instance group backends. USE_NAMED_PORT: Not + // supported. USE_SERVING_PORT: Provides an indirect method of + // specifying the health check port by referring to the backend service. + // Only supported by backend services for proxy load balancers. Not + // supported by target pools. Not supported by backend services for + // pass-through load balancers. Supports all backends that can be health + // checked; for example, GCE_VM_IP_PORT network endpoint groups and + // instance group backends. For GCE_VM_IP_PORT network endpoint group + // backends, the health check uses the port number specified for each + // endpoint in the network endpoint group. For instance group backends, + // the health check uses the port number determined by looking up the + // backend service's named port in the instance group's list of named + // ports. // // Possible values: - // "USE_FIXED_PORT" - The port number in port is used for health - // checking. - // "USE_NAMED_PORT" - The portName is used for health checking. - // "USE_SERVING_PORT" - For NetworkEndpointGroup, the port specified - // for each network endpoint is used for health checking. For other - // backends, the port or named port specified in the Backend Service is - // used for health checking. + // "USE_FIXED_PORT" - The port number in the health check's port is + // used for health checking. Applies to network endpoint group and + // instance group backends. + // "USE_NAMED_PORT" - Not supported. + // "USE_SERVING_PORT" - For network endpoint group backends, the + // health check uses the port number specified on each endpoint in the + // network endpoint group. For instance group backends, the health check + // uses the port number specified for the backend service's named port + // defined in the instance group's named ports. PortSpecification string `json:"portSpecification,omitempty"` // ForceSendFields is a list of field names (e.g. "GrpcServiceName") to @@ -13477,8 +13800,8 @@ type GuestOsFeature struct { // commas to separate values. Set to one or more of the following // values: - VIRTIO_SCSI_MULTIQUEUE - WINDOWS - MULTI_IP_SUBNET - // UEFI_COMPATIBLE - GVNIC - SEV_CAPABLE - SUSPEND_RESUME_COMPATIBLE - - // SEV_SNP_CAPABLE For more information, see Enabling guest operating - // system features. + // SEV_SNP_CAPABLE - TDX_CAPABLE For more information, see Enabling + // guest operating system features. // // Possible values: // "FEATURE_TYPE_UNSPECIFIED" @@ -13486,6 +13809,7 @@ type GuestOsFeature struct { // "MULTI_IP_SUBNET" // "SECURE_BOOT" // "SEV_CAPABLE" + // "SEV_SNP_CAPABLE" // "UEFI_COMPATIBLE" // "VIRTIO_SCSI_MULTIQUEUE" // "WINDOWS" @@ -13516,36 +13840,52 @@ func (s *GuestOsFeature) MarshalJSON() ([]byte, error) { type HTTP2HealthCheck struct { // Host: The value of the host header in the HTTP/2 health check - // request. If left empty (default value), the IP on behalf of which - // this health check is performed will be used. + // request. If left empty (default value), the host header is set to the + // destination IP address to which health check packets are sent. The + // destination IP address depends on the type of load balancer. For + // details, see: + // https://cloud.google.com/load-balancing/docs/health-check-concepts#hc-packet-dest Host string `json:"host,omitempty"` - // Port: The TCP port number for the health check request. The default - // value is 443. Valid values are 1 through 65535. + // Port: The TCP port number to which the health check prober sends + // packets. The default value is 443. Valid values are 1 through 65535. Port int64 `json:"port,omitempty"` - // PortName: Port name as defined in InstanceGroup#NamedPort#name. If - // both port and port_name are defined, port takes precedence. + // PortName: Not supported. PortName string `json:"portName,omitempty"` - // PortSpecification: Specifies how port is selected for health - // checking, can be one of following values: USE_FIXED_PORT: The port - // number in port is used for health checking. USE_NAMED_PORT: The - // portName is used for health checking. USE_SERVING_PORT: For - // NetworkEndpointGroup, the port specified for each network endpoint is - // used for health checking. For other backends, the port or named port - // specified in the Backend Service is used for health checking. If not - // specified, HTTP2 health check follows behavior specified in port and - // portName fields. + // PortSpecification: Specifies how a port is selected for health + // checking. Can be one of the following values: USE_FIXED_PORT: + // Specifies a port number explicitly using the port field in the health + // check. Supported by backend services for pass-through load balancers + // and backend services for proxy load balancers. Not supported by + // target pools. The health check supports all backends supported by the + // backend service provided the backend can be health checked. For + // example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network + // endpoint groups, and instance group backends. USE_NAMED_PORT: Not + // supported. USE_SERVING_PORT: Provides an indirect method of + // specifying the health check port by referring to the backend service. + // Only supported by backend services for proxy load balancers. Not + // supported by target pools. Not supported by backend services for + // pass-through load balancers. Supports all backends that can be health + // checked; for example, GCE_VM_IP_PORT network endpoint groups and + // instance group backends. For GCE_VM_IP_PORT network endpoint group + // backends, the health check uses the port number specified for each + // endpoint in the network endpoint group. For instance group backends, + // the health check uses the port number determined by looking up the + // backend service's named port in the instance group's list of named + // ports. // // Possible values: - // "USE_FIXED_PORT" - The port number in port is used for health - // checking. - // "USE_NAMED_PORT" - The portName is used for health checking. - // "USE_SERVING_PORT" - For NetworkEndpointGroup, the port specified - // for each network endpoint is used for health checking. For other - // backends, the port or named port specified in the Backend Service is - // used for health checking. + // "USE_FIXED_PORT" - The port number in the health check's port is + // used for health checking. Applies to network endpoint group and + // instance group backends. + // "USE_NAMED_PORT" - Not supported. + // "USE_SERVING_PORT" - For network endpoint group backends, the + // health check uses the port number specified on each endpoint in the + // network endpoint group. For instance group backends, the health check + // uses the port number specified for the backend service's named port + // defined in the instance group's named ports. PortSpecification string `json:"portSpecification,omitempty"` // ProxyHeader: Specifies the type of proxy header to append before @@ -13561,9 +13901,12 @@ type HTTP2HealthCheck struct { // default value is /. RequestPath string `json:"requestPath,omitempty"` - // Response: The string to match anywhere in the first 1024 bytes of the - // response body. If left empty (the default value), the status code - // determines health. The response data can only be ASCII. + // Response: Creates a content-based HTTP/2 health check. In addition to + // the required HTTP 200 (OK) status code, you can configure the health + // check to pass only when the backend sends this specific ASCII + // response string within the first 1024 bytes of the HTTP response + // body. For details, see: + // https://cloud.google.com/load-balancing/docs/health-check-concepts#criteria-protocol-http Response string `json:"response,omitempty"` // ForceSendFields is a list of field names (e.g. "Host") to @@ -13591,36 +13934,52 @@ func (s *HTTP2HealthCheck) MarshalJSON() ([]byte, error) { type HTTPHealthCheck struct { // Host: The value of the host header in the HTTP health check request. - // If left empty (default value), the IP on behalf of which this health - // check is performed will be used. + // If left empty (default value), the host header is set to the + // destination IP address to which health check packets are sent. The + // destination IP address depends on the type of load balancer. For + // details, see: + // https://cloud.google.com/load-balancing/docs/health-check-concepts#hc-packet-dest Host string `json:"host,omitempty"` - // Port: The TCP port number for the health check request. The default - // value is 80. Valid values are 1 through 65535. + // Port: The TCP port number to which the health check prober sends + // packets. The default value is 80. Valid values are 1 through 65535. Port int64 `json:"port,omitempty"` - // PortName: Port name as defined in InstanceGroup#NamedPort#name. If - // both port and port_name are defined, port takes precedence. + // PortName: Not supported. PortName string `json:"portName,omitempty"` - // PortSpecification: Specifies how port is selected for health - // checking, can be one of following values: USE_FIXED_PORT: The port - // number in port is used for health checking. USE_NAMED_PORT: The - // portName is used for health checking. USE_SERVING_PORT: For - // NetworkEndpointGroup, the port specified for each network endpoint is - // used for health checking. For other backends, the port or named port - // specified in the Backend Service is used for health checking. If not - // specified, HTTP health check follows behavior specified in port and - // portName fields. + // PortSpecification: Specifies how a port is selected for health + // checking. Can be one of the following values: USE_FIXED_PORT: + // Specifies a port number explicitly using the port field in the health + // check. Supported by backend services for pass-through load balancers + // and backend services for proxy load balancers. Also supported in + // legacy HTTP health checks for target pools. The health check supports + // all backends supported by the backend service provided the backend + // can be health checked. For example, GCE_VM_IP network endpoint + // groups, GCE_VM_IP_PORT network endpoint groups, and instance group + // backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides + // an indirect method of specifying the health check port by referring + // to the backend service. Only supported by backend services for proxy + // load balancers. Not supported by target pools. Not supported by + // backend services for pass-through load balancers. Supports all + // backends that can be health checked; for example, GCE_VM_IP_PORT + // network endpoint groups and instance group backends. For + // GCE_VM_IP_PORT network endpoint group backends, the health check uses + // the port number specified for each endpoint in the network endpoint + // group. For instance group backends, the health check uses the port + // number determined by looking up the backend service's named port in + // the instance group's list of named ports. // // Possible values: - // "USE_FIXED_PORT" - The port number in port is used for health - // checking. - // "USE_NAMED_PORT" - The portName is used for health checking. - // "USE_SERVING_PORT" - For NetworkEndpointGroup, the port specified - // for each network endpoint is used for health checking. For other - // backends, the port or named port specified in the Backend Service is - // used for health checking. + // "USE_FIXED_PORT" - The port number in the health check's port is + // used for health checking. Applies to network endpoint group and + // instance group backends. + // "USE_NAMED_PORT" - Not supported. + // "USE_SERVING_PORT" - For network endpoint group backends, the + // health check uses the port number specified on each endpoint in the + // network endpoint group. For instance group backends, the health check + // uses the port number specified for the backend service's named port + // defined in the instance group's named ports. PortSpecification string `json:"portSpecification,omitempty"` // ProxyHeader: Specifies the type of proxy header to append before @@ -13636,9 +13995,12 @@ type HTTPHealthCheck struct { // default value is /. RequestPath string `json:"requestPath,omitempty"` - // Response: The string to match anywhere in the first 1024 bytes of the - // response body. If left empty (the default value), the status code - // determines health. The response data can only be ASCII. + // Response: Creates a content-based HTTP health check. In addition to + // the required HTTP 200 (OK) status code, you can configure the health + // check to pass only when the backend sends this specific ASCII + // response string within the first 1024 bytes of the HTTP response + // body. For details, see: + // https://cloud.google.com/load-balancing/docs/health-check-concepts#criteria-protocol-http Response string `json:"response,omitempty"` // ForceSendFields is a list of field names (e.g. "Host") to @@ -13666,36 +14028,52 @@ func (s *HTTPHealthCheck) MarshalJSON() ([]byte, error) { type HTTPSHealthCheck struct { // Host: The value of the host header in the HTTPS health check request. - // If left empty (default value), the IP on behalf of which this health - // check is performed will be used. + // If left empty (default value), the host header is set to the + // destination IP address to which health check packets are sent. The + // destination IP address depends on the type of load balancer. For + // details, see: + // https://cloud.google.com/load-balancing/docs/health-check-concepts#hc-packet-dest Host string `json:"host,omitempty"` - // Port: The TCP port number for the health check request. The default - // value is 443. Valid values are 1 through 65535. + // Port: The TCP port number to which the health check prober sends + // packets. The default value is 443. Valid values are 1 through 65535. Port int64 `json:"port,omitempty"` - // PortName: Port name as defined in InstanceGroup#NamedPort#name. If - // both port and port_name are defined, port takes precedence. + // PortName: Not supported. PortName string `json:"portName,omitempty"` - // PortSpecification: Specifies how port is selected for health - // checking, can be one of following values: USE_FIXED_PORT: The port - // number in port is used for health checking. USE_NAMED_PORT: The - // portName is used for health checking. USE_SERVING_PORT: For - // NetworkEndpointGroup, the port specified for each network endpoint is - // used for health checking. For other backends, the port or named port - // specified in the Backend Service is used for health checking. If not - // specified, HTTPS health check follows behavior specified in port and - // portName fields. + // PortSpecification: Specifies how a port is selected for health + // checking. Can be one of the following values: USE_FIXED_PORT: + // Specifies a port number explicitly using the port field in the health + // check. Supported by backend services for pass-through load balancers + // and backend services for proxy load balancers. Not supported by + // target pools. The health check supports all backends supported by the + // backend service provided the backend can be health checked. For + // example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network + // endpoint groups, and instance group backends. USE_NAMED_PORT: Not + // supported. USE_SERVING_PORT: Provides an indirect method of + // specifying the health check port by referring to the backend service. + // Only supported by backend services for proxy load balancers. Not + // supported by target pools. Not supported by backend services for + // pass-through load balancers. Supports all backends that can be health + // checked; for example, GCE_VM_IP_PORT network endpoint groups and + // instance group backends. For GCE_VM_IP_PORT network endpoint group + // backends, the health check uses the port number specified for each + // endpoint in the network endpoint group. For instance group backends, + // the health check uses the port number determined by looking up the + // backend service's named port in the instance group's list of named + // ports. // // Possible values: - // "USE_FIXED_PORT" - The port number in port is used for health - // checking. - // "USE_NAMED_PORT" - The portName is used for health checking. - // "USE_SERVING_PORT" - For NetworkEndpointGroup, the port specified - // for each network endpoint is used for health checking. For other - // backends, the port or named port specified in the Backend Service is - // used for health checking. + // "USE_FIXED_PORT" - The port number in the health check's port is + // used for health checking. Applies to network endpoint group and + // instance group backends. + // "USE_NAMED_PORT" - Not supported. + // "USE_SERVING_PORT" - For network endpoint group backends, the + // health check uses the port number specified on each endpoint in the + // network endpoint group. For instance group backends, the health check + // uses the port number specified for the backend service's named port + // defined in the instance group's named ports. PortSpecification string `json:"portSpecification,omitempty"` // ProxyHeader: Specifies the type of proxy header to append before @@ -13711,9 +14089,12 @@ type HTTPSHealthCheck struct { // default value is /. RequestPath string `json:"requestPath,omitempty"` - // Response: The string to match anywhere in the first 1024 bytes of the - // response body. If left empty (the default value), the status code - // determines health. The response data can only be ASCII. + // Response: Creates a content-based HTTPS health check. In addition to + // the required HTTP 200 (OK) status code, you can configure the health + // check to pass only when the backend sends this specific ASCII + // response string within the first 1024 bytes of the HTTP response + // body. For details, see: + // https://cloud.google.com/load-balancing/docs/health-check-concepts#criteria-protocol-http Response string `json:"response,omitempty"` // ForceSendFields is a list of field names (e.g. "Host") to @@ -14133,14 +14514,20 @@ type HealthCheckService struct { Fingerprint string `json:"fingerprint,omitempty"` // HealthChecks: A list of URLs to the HealthCheck resources. Must have - // at least one HealthCheck, and not more than 10. HealthCheck resources - // must have portSpecification=USE_SERVING_PORT or + // at least one HealthCheck, and not more than 10 for regional + // HealthCheckService, and not more than 1 for global + // HealthCheckService. HealthCheck resources must have + // portSpecification=USE_SERVING_PORT or // portSpecification=USE_FIXED_PORT. For regional HealthCheckService, // the HealthCheck must be regional and in the same region. For global // HealthCheckService, HealthCheck must be global. Mix of regional and // global HealthChecks is not supported. Multiple regional HealthChecks // must belong to the same region. Regional HealthChecks must belong to - // the same region as zones of NEGs. + // the same region as zones of NetworkEndpointGroups. For global + // HealthCheckService using global INTERNET_IP_PORT + // NetworkEndpointGroups, the global HealthChecks must specify + // sourceRegions, and HealthChecks that specify sourceRegions can only + // be used with global INTERNET_IP_PORT NetworkEndpointGroups. HealthChecks []string `json:"healthChecks,omitempty"` // HealthStatusAggregationPolicy: Optional. Policy for how the results @@ -14150,6 +14537,7 @@ type HealthCheckService struct { // service. - AND. If any health check of an endpoint reports UNHEALTHY, // then UNHEALTHY is the HealthState of the endpoint. If all health // checks report HEALTHY, the HealthState of the endpoint is HEALTHY. . + // This is only allowed with regional HealthCheckService. // // Possible values: // "AND" - If any backend's health check reports UNHEALTHY, then @@ -14180,7 +14568,8 @@ type HealthCheckService struct { // NetworkEndpointGroups: A list of URLs to the NetworkEndpointGroup // resources. Must not have more than 100. For regional // HealthCheckService, NEGs must be in zones in the region of the - // HealthCheckService. + // HealthCheckService. For global HealthCheckServices, the + // NetworkEndpointGroups must be global INTERNET_IP_PORT. NetworkEndpointGroups []string `json:"networkEndpointGroups,omitempty"` // NotificationEndpoints: A list of URLs to the NotificationEndpoint @@ -15282,8 +15671,8 @@ type HttpHeaderMatch struct { // in the HTTP request, use a headerMatch with headerName set to PORT // and a regular expression that satisfies the RFC2616 Host header's // port specifier. Only one of exactMatch, prefixMatch, suffixMatch, - // regexMatch, presentMatch or rangeMatch must be set. regexMatch only - // applies to load balancers that have loadBalancingScheme set to + // regexMatch, presentMatch or rangeMatch must be set. Regular + // expressions can only be used when the loadBalancingScheme is set to // INTERNAL_SELF_MANAGED. RegexMatch string `json:"regexMatch,omitempty"` @@ -15404,6 +15793,7 @@ type HttpHealthCheck struct { // RequestPath: The request path of the HTTP health check request. The // default value is /. This field does not support query parameters. + // Must comply with RFC3986. RequestPath string `json:"requestPath,omitempty"` // SelfLink: [Output Only] Server-defined URL for the resource. @@ -15658,8 +16048,8 @@ type HttpQueryParameterMatch struct { // RegexMatch: The queryParameterMatch matches if the value of the // parameter matches the regular expression specified by regexMatch. For // more information about regular expression syntax, see Syntax. Only - // one of presentMatch, exactMatch, or regexMatch must be set. - // regexMatch only applies when the loadBalancingScheme is set to + // one of presentMatch, exactMatch, or regexMatch must be set. Regular + // expressions can only be used when the loadBalancingScheme is set to // INTERNAL_SELF_MANAGED. RegexMatch string `json:"regexMatch,omitempty"` @@ -15974,9 +16364,9 @@ type HttpRouteRule struct { // backend. If routeAction specifies any weightedBackendServices, // service must not be set. Conversely if service is set, routeAction // cannot contain any weightedBackendServices. Only one of urlRedirect, - // service or routeAction.weightedBackendService must be set. UrlMaps - // for external HTTP(S) load balancers support only the urlRewrite - // action within a route rule's routeAction. + // service or routeAction.weightedBackendService must be set. URL maps + // for Classic external HTTP(S) load balancers only support the + // urlRewrite action within a route rule's routeAction. RouteAction *HttpRouteAction `json:"routeAction,omitempty"` // Service: The full or partial URL of the backend service resource to @@ -16077,8 +16467,8 @@ type HttpRouteRuleMatch struct { // after removing any query parameters and anchor supplied with the // original URL. For more information about regular expression syntax, // see Syntax. Only one of prefixMatch, fullPathMatch or regexMatch must - // be specified. regexMatch only applies to load balancers that have - // loadBalancingScheme set to INTERNAL_SELF_MANAGED. + // be specified. Regular expressions can only be used when the + // loadBalancingScheme is set to INTERNAL_SELF_MANAGED. RegexMatch string `json:"regexMatch,omitempty"` // ForceSendFields is a list of field names (e.g. "FullPathMatch") to @@ -16154,7 +16544,7 @@ type HttpsHealthCheck struct { Port int64 `json:"port,omitempty"` // RequestPath: The request path of the HTTPS health check request. The - // default value is "/". + // default value is "/". Must comply with RFC3986. RequestPath string `json:"requestPath,omitempty"` // SelfLink: [Output Only] Server-defined URL for the resource. @@ -16419,11 +16809,14 @@ type Image struct { // (in GB). DiskSizeGb int64 `json:"diskSizeGb,omitempty,string"` - // Family: The name of the image family to which this image belongs. You - // can create disks by specifying an image family instead of a specific - // image name. The image family always returns its latest image that is - // not deprecated. The name of the image family must comply with - // RFC1035. + // Family: The name of the image family to which this image belongs. The + // image family name can be from a publicly managed image family + // provided by Compute Engine, or from a custom image family you create. + // For example, centos-stream-9 is a publicly available image family. + // For more information, see Image family best practices. When creating + // disks, you can specify an image family instead of a specific image + // name. The image family always returns its latest image that is not + // deprecated. The name of the image family must comply with RFC1035. Family string `json:"family,omitempty"` // GuestOsFeatures: A list of features to enable on the guest operating @@ -17083,6 +17476,11 @@ type Instance struct { // ResourcePolicies: Resource policies applied to this instance. ResourcePolicies []string `json:"resourcePolicies,omitempty"` + // ResourceStatus: [Output Only] Specifies values set for instance + // attributes as compared to the values requested by user in the + // corresponding input only field. + ResourceStatus *ResourceStatus `json:"resourceStatus,omitempty"` + // SatisfiesPzs: [Output Only] Reserved for future use. SatisfiesPzs bool `json:"satisfiesPzs,omitempty"` @@ -17377,6 +17775,77 @@ func (s *InstanceAggregatedListWarningData) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +type InstanceConsumptionData struct { + // ConsumptionInfo: Resources consumed by the instance. + ConsumptionInfo *InstanceConsumptionInfo `json:"consumptionInfo,omitempty"` + + // Instance: Server-defined URL for the instance. + Instance string `json:"instance,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ConsumptionInfo") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ConsumptionInfo") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *InstanceConsumptionData) MarshalJSON() ([]byte, error) { + type NoMethod InstanceConsumptionData + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type InstanceConsumptionInfo struct { + // GuestCpus: The number of virtual CPUs that are available to the + // instance. + GuestCpus int64 `json:"guestCpus,omitempty"` + + // LocalSsdGb: The amount of local SSD storage available to the + // instance, defined in GiB. + LocalSsdGb int64 `json:"localSsdGb,omitempty"` + + // MemoryMb: The amount of physical memory available to the instance, + // defined in MiB. + MemoryMb int64 `json:"memoryMb,omitempty"` + + // MinNodeCpus: The minimal guaranteed number of virtual CPUs that are + // reserved. + MinNodeCpus int64 `json:"minNodeCpus,omitempty"` + + // ForceSendFields is a list of field names (e.g. "GuestCpus") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "GuestCpus") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InstanceConsumptionInfo) MarshalJSON() ([]byte, error) { + type NoMethod InstanceConsumptionInfo + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // InstanceGroup: Represents an Instance Group resource. Instance Groups // can be used to configure a target for load balancing. Instance groups // can either be managed or unmanaged. To create managed instance @@ -17919,6 +18388,19 @@ type InstanceGroupManager struct { // compute#instanceGroupManager for managed instance groups. Kind string `json:"kind,omitempty"` + // ListManagedInstancesResults: Pagination behavior of the + // listManagedInstances API method for this managed instance group. + // + // Possible values: + // "PAGELESS" - (Default) Pagination is disabled for the group's + // listManagedInstances API method. maxResults and pageToken query + // parameters are ignored and all instances are returned in a single + // response. + // "PAGINATED" - Pagination is enabled for the group's + // listManagedInstances API method. maxResults and pageToken query + // parameters are respected. + ListManagedInstancesResults string `json:"listManagedInstancesResults,omitempty"` + // Name: The name of the managed instance group. The name must be 1-63 // characters long, and comply with RFC1035. Name string `json:"name,omitempty"` @@ -20920,6 +21402,10 @@ type InstanceTemplate struct { // Properties: The instance properties for this instance template. Properties *InstanceProperties `json:"properties,omitempty"` + // Region: [Output Only] URL of the region where the instance template + // resides. Only applicable for regional resources. + Region string `json:"region,omitempty"` + // SelfLink: [Output Only] The URL for this instance template. The // server defines this URL. SelfLink string `json:"selfLink,omitempty"` @@ -20963,6 +21449,196 @@ func (s *InstanceTemplate) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// InstanceTemplateAggregatedList: Contains a list of +// InstanceTemplatesScopedList. +type InstanceTemplateAggregatedList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: A list of InstanceTemplatesScopedList resources. + Items map[string]InstanceTemplatesScopedList `json:"items,omitempty"` + + // Kind: Type of resource. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *InstanceTemplateAggregatedListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InstanceTemplateAggregatedList) MarshalJSON() ([]byte, error) { + type NoMethod InstanceTemplateAggregatedList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// InstanceTemplateAggregatedListWarning: [Output Only] Informational +// warning message. +type InstanceTemplateAggregatedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" - Warning about failed cleanup of transient + // changes made by a failed operation. + // "DEPRECATED_RESOURCE_USED" - A link to a deprecated resource was + // created. + // "DEPRECATED_TYPE_USED" - When deploying and at least one of the + // resources has a type marked as deprecated + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - The user created a boot disk + // that is larger than image size. + // "EXPERIMENTAL_TYPE_USED" - When deploying and at least one of the + // resources has a type marked as experimental + // "EXTERNAL_API_WARNING" - Warning that is present in an external api + // call + // "FIELD_VALUE_OVERRIDEN" - Warning that value of a field has been + // overridden. Deprecated unused field. + // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an + // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. + // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a + // exceedingly large number of resources + // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is + // not assigned to an instance on the network. + // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot + // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. + // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL + // refers to an instance that does not exist. + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance + // URL refers to an instance that is not on the same network as the + // route. + // "NEXT_HOP_NOT_RUNNING" - The route's next hop instance does not + // have a status of RUNNING. + // "NOT_CRITICAL_ERROR" - Error which is not critical. We decided to + // continue the process despite the mentioned error. + // "NO_RESULTS_ON_PAGE" - No results are present on a particular list + // page. + // "PARTIAL_SUCCESS" - Success is reported, but some results may be + // missing due to errors + // "REQUIRED_TOS_AGREEMENT" - The user attempted to use a resource + // that requires a TOS they have not accepted. + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - Warning that a + // resource is in use. + // "RESOURCE_NOT_DELETED" - One or more of the resources set to + // auto-delete could not be deleted because they were in use. + // "SCHEMA_VALIDATION_IGNORED" - When a resource schema validation is + // ignored. + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - Instance template used in + // instance group manager is valid as such, but its application does not + // make a lot of sense, because it allows only single instance in + // instance group. + // "UNDECLARED_PROPERTIES" - When undeclared properties in the schema + // are present + // "UNREACHABLE" - A given scope cannot be reached. + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" + // } + Data []*InstanceTemplateAggregatedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InstanceTemplateAggregatedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod InstanceTemplateAggregatedListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type InstanceTemplateAggregatedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InstanceTemplateAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod InstanceTemplateAggregatedListWarningData + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // InstanceTemplateList: A list of instance templates. type InstanceTemplateList struct { // Id: [Output Only] Unique identifier for the resource; defined by the @@ -21153,6 +21829,176 @@ func (s *InstanceTemplateListWarningData) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +type InstanceTemplatesScopedList struct { + // InstanceTemplates: [Output Only] A list of instance templates that + // are contained within the specified project and zone. + InstanceTemplates []*InstanceTemplate `json:"instanceTemplates,omitempty"` + + // Warning: [Output Only] An informational warning that replaces the + // list of instance templates when the list is empty. + Warning *InstanceTemplatesScopedListWarning `json:"warning,omitempty"` + + // ForceSendFields is a list of field names (e.g. "InstanceTemplates") + // to unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "InstanceTemplates") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *InstanceTemplatesScopedList) MarshalJSON() ([]byte, error) { + type NoMethod InstanceTemplatesScopedList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// InstanceTemplatesScopedListWarning: [Output Only] An informational +// warning that replaces the list of instance templates when the list is +// empty. +type InstanceTemplatesScopedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" - Warning about failed cleanup of transient + // changes made by a failed operation. + // "DEPRECATED_RESOURCE_USED" - A link to a deprecated resource was + // created. + // "DEPRECATED_TYPE_USED" - When deploying and at least one of the + // resources has a type marked as deprecated + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - The user created a boot disk + // that is larger than image size. + // "EXPERIMENTAL_TYPE_USED" - When deploying and at least one of the + // resources has a type marked as experimental + // "EXTERNAL_API_WARNING" - Warning that is present in an external api + // call + // "FIELD_VALUE_OVERRIDEN" - Warning that value of a field has been + // overridden. Deprecated unused field. + // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an + // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. + // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a + // exceedingly large number of resources + // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is + // not assigned to an instance on the network. + // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot + // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. + // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL + // refers to an instance that does not exist. + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance + // URL refers to an instance that is not on the same network as the + // route. + // "NEXT_HOP_NOT_RUNNING" - The route's next hop instance does not + // have a status of RUNNING. + // "NOT_CRITICAL_ERROR" - Error which is not critical. We decided to + // continue the process despite the mentioned error. + // "NO_RESULTS_ON_PAGE" - No results are present on a particular list + // page. + // "PARTIAL_SUCCESS" - Success is reported, but some results may be + // missing due to errors + // "REQUIRED_TOS_AGREEMENT" - The user attempted to use a resource + // that requires a TOS they have not accepted. + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - Warning that a + // resource is in use. + // "RESOURCE_NOT_DELETED" - One or more of the resources set to + // auto-delete could not be deleted because they were in use. + // "SCHEMA_VALIDATION_IGNORED" - When a resource schema validation is + // ignored. + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - Instance template used in + // instance group manager is valid as such, but its application does not + // make a lot of sense, because it allows only single instance in + // instance group. + // "UNDECLARED_PROPERTIES" - When undeclared properties in the schema + // are present + // "UNREACHABLE" - A given scope cannot be reached. + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" + // } + Data []*InstanceTemplatesScopedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InstanceTemplatesScopedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod InstanceTemplatesScopedListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type InstanceTemplatesScopedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InstanceTemplatesScopedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod InstanceTemplatesScopedListWarningData + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type InstanceWithNamedPorts struct { // Instance: [Output Only] The URL of the instance. Instance string `json:"instance,omitempty"` @@ -21628,6 +22474,39 @@ func (s *InstancesSetMinCpuPlatformRequest) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +type InstancesSetNameRequest struct { + // CurrentName: The current name of this resource, used to prevent + // conflicts. Provide the latest name when making a request to change + // name. + CurrentName string `json:"currentName,omitempty"` + + // Name: The name to be applied to the instance. Needs to be RFC 1035 + // compliant. + Name string `json:"name,omitempty"` + + // ForceSendFields is a list of field names (e.g. "CurrentName") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CurrentName") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InstancesSetNameRequest) MarshalJSON() ([]byte, error) { + type NoMethod InstancesSetNameRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type InstancesSetServiceAccountRequest struct { // Email: Email address of the service account. Email string `json:"email,omitempty"` @@ -21820,8 +22699,9 @@ type Interconnect struct { // NocContactEmail: Email address to contact the customer NOC for // operations and maintenance notifications regarding this Interconnect. // If specified, this will be used for notifications in addition to all - // other forms described, such as Stackdriver logs alerting and Cloud - // Notifications. + // other forms described, such as Cloud Monitoring logs alerting and + // Cloud Notifications. This field is required for users who sign up for + // Cloud Interconnect using workforce identity federation. NocContactEmail string `json:"nocContactEmail,omitempty"` // OperationalStatus: [Output Only] The current status of this @@ -21855,9 +22735,7 @@ type Interconnect struct { // bundle, as requested by the customer. RequestedLinkCount int64 `json:"requestedLinkCount,omitempty"` - // SatisfiesPzs: [Output Only] Set to true if the resource satisfies the - // zone separation organization policy constraints and false otherwise. - // Defaults to false if the field is not present. + // SatisfiesPzs: [Output Only] Reserved for future use. SatisfiesPzs bool `json:"satisfiesPzs,omitempty"` // SelfLink: [Output Only] Server-defined URL for the resource. @@ -22019,16 +22897,16 @@ type InterconnectAttachment struct { // - The VLAN attachment carries only encrypted traffic that is // encrypted by an IPsec device, such as an HA VPN gateway or // third-party IPsec VPN. VMs cannot directly send traffic to, or - // receive traffic from, such a VLAN attachment. To use *IPsec-encrypted + // receive traffic from, such a VLAN attachment. To use *HA VPN over // Cloud Interconnect*, the VLAN attachment must be created with this - // option. Not currently available publicly. + // option. // // Possible values: // "IPSEC" - The interconnect attachment will carry only encrypted // traffic that is encrypted by an IPsec device such as HA VPN gateway; // VMs cannot directly send traffic to or receive traffic from such an - // interconnect attachment. To use IPsec-encrypted Cloud Interconnect, - // the interconnect attachment must be created with this option. + // interconnect attachment. To use HA VPN over Cloud Interconnect, the + // interconnect attachment must be created with this option. // "NONE" - This is the default value, which means the Interconnect // Attachment will carry unencrypted traffic. VMs will be able to send // traffic to or receive traffic from such interconnect attachment. @@ -22129,9 +23007,7 @@ type InterconnectAttachment struct { // the network & region within which the Cloud Router is configured. Router string `json:"router,omitempty"` - // SatisfiesPzs: [Output Only] Set to true if the resource satisfies the - // zone separation organization policy constraints and false otherwise. - // Defaults to false if the field is not present. + // SatisfiesPzs: [Output Only] Reserved for future use. SatisfiesPzs bool `json:"satisfiesPzs,omitempty"` // SelfLink: [Output Only] Server-defined URL for the resource. @@ -22203,7 +23079,7 @@ type InterconnectAttachment struct { Type string `json:"type,omitempty"` // VlanTag8021q: The IEEE 802.1Q VLAN tag for this attachment, in the - // range 2-4094. Only specified at creation time. + // range 2-4093. Only specified at creation time. VlanTag8021q int64 `json:"vlanTag8021q,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -22914,6 +23790,27 @@ type InterconnectDiagnostics struct { // Interconnect is not bundled. ArpCaches []*InterconnectDiagnosticsARPEntry `json:"arpCaches,omitempty"` + // BundleAggregationType: The aggregation type of the bundle interface. + // + // Possible values: + // "BUNDLE_AGGREGATION_TYPE_LACP" - LACP is enabled. + // "BUNDLE_AGGREGATION_TYPE_STATIC" - LACP is disabled. + BundleAggregationType string `json:"bundleAggregationType,omitempty"` + + // BundleOperationalStatus: The operational status of the bundle + // interface. + // + // Possible values: + // "BUNDLE_OPERATIONAL_STATUS_DOWN" - If bundleAggregationType is + // LACP: LACP is not established and/or all links in the bundle have + // DOWN operational status. If bundleAggregationType is STATIC: one or + // more links in the bundle has DOWN operational status. + // "BUNDLE_OPERATIONAL_STATUS_UP" - If bundleAggregationType is LACP: + // LACP is established and at least one link in the bundle has UP + // operational status. If bundleAggregationType is STATIC: all links in + // the bundle (typically just one) have UP operational status. + BundleOperationalStatus string `json:"bundleOperationalStatus,omitempty"` + // Links: A list of InterconnectDiagnostics.LinkStatus objects, // describing the status for each link on the Interconnect. Links []*InterconnectDiagnosticsLinkStatus `json:"links,omitempty"` @@ -23103,6 +24000,15 @@ type InterconnectDiagnosticsLinkStatus struct { LacpStatus *InterconnectDiagnosticsLinkLACPStatus `json:"lacpStatus,omitempty"` + // OperationalStatus: The operational status of the link. + // + // Possible values: + // "LINK_OPERATIONAL_STATUS_DOWN" - The interface is unable to + // communicate with the remote end. + // "LINK_OPERATIONAL_STATUS_UP" - The interface has low level + // communication with the remote end. + OperationalStatus string `json:"operationalStatus,omitempty"` + // ReceivingOpticalPower: An InterconnectDiagnostics.LinkOpticalPower // object, describing the current value and status of the received light // level. @@ -23414,9 +24320,7 @@ type InterconnectLocation struct { // Interconnects. Status string `json:"status,omitempty"` - // SupportsPzs: [Output Only] Set to true for locations that support - // physical zone separation. Defaults to false if the field is not - // present. + // SupportsPzs: [Output Only] Reserved for future use. SupportsPzs bool `json:"supportsPzs,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -24291,7 +25195,7 @@ func (s *LocalDisk) MarshalJSON() ([]byte, error) { // return to the user which can be attached to an RPC error. type LocalizedMessage struct { // Locale: The locale used following the specification defined at - // http://www.rfc-editor.org/rfc/bcp/bcp47.txt. Examples are: "en-US", + // https://www.rfc-editor.org/rfc/bcp/bcp47.txt. Examples are: "en-US", // "fr-CH", "es-MX" Locale string `json:"locale,omitempty"` @@ -25923,6 +26827,8 @@ type ManagedInstanceLastAttemptErrorsErrorsErrorDetails struct { LocalizedMessage *LocalizedMessage `json:"localizedMessage,omitempty"` + QuotaInfo *QuotaExceededInfo `json:"quotaInfo,omitempty"` + // ForceSendFields is a list of field names (e.g. "ErrorInfo") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any @@ -26245,8 +27151,9 @@ type Network struct { Kind string `json:"kind,omitempty"` // Mtu: Maximum Transmission Unit in bytes. The minimum value for this - // field is 1460 and the maximum value is 1500 bytes. If unspecified, - // defaults to 1460. + // field is 1300 and the maximum value is 8896. The suggested value is + // 1500, which is the default MTU used on the Internet, or 8896 if you + // want to use Jumbo frames. If unspecified, the value defaults to 1460. Mtu int64 `json:"mtu,omitempty"` // Name: Name of the resource. Provided by the client when the resource @@ -26314,9 +27221,19 @@ func (s *Network) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// NetworkEdgeSecurityService: Represents a Google Cloud Armor network -// edge security service resource. -type NetworkEdgeSecurityService struct { +// NetworkAttachment: NetworkAttachments A network attachment resource +// ... +type NetworkAttachment struct { + // ConnectionEndpoints: [Output Only] An array of connections for all + // the producers connected to this network attachment. + ConnectionEndpoints []*NetworkAttachmentConnectedEndpoint `json:"connectionEndpoints,omitempty"` + + // Possible values: + // "ACCEPT_AUTOMATIC" + // "ACCEPT_MANUAL" + // "INVALID" + ConnectionPreference string `json:"connectionPreference,omitempty"` + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. CreationTimestamp string `json:"creationTimestamp,omitempty"` @@ -26325,21 +27242,17 @@ type NetworkEdgeSecurityService struct { // property when you create the resource. Description string `json:"description,omitempty"` - // Fingerprint: Fingerprint of this resource. A hash of the contents - // stored in this object. This field is used in optimistic locking. This - // field will be ignored when inserting a NetworkEdgeSecurityService. An - // up-to-date fingerprint must be provided in order to update the - // NetworkEdgeSecurityService, otherwise the request will fail with - // error 412 conditionNotMet. To see the latest fingerprint, make a - // get() request to retrieve a NetworkEdgeSecurityService. + // Fingerprint: [Output Only] Fingerprint of this resource. A hash of + // the contents stored in this object. This field is used in optimistic + // locking. An up-to-date fingerprint must be provided in order to + // patch. Fingerprint string `json:"fingerprint,omitempty"` - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. + // Id: [Output Only] The unique identifier for the resource type. The + // server generates this identifier. Id uint64 `json:"id,omitempty,string"` - // Kind: [Output only] Type of the resource. Always - // compute#networkEdgeSecurityService for NetworkEdgeSecurityServices + // Kind: [Output Only] Type of the resource. Kind string `json:"kind,omitempty"` // Name: Name of the resource. Provided by the client when the resource @@ -26351,27 +27264,43 @@ type NetworkEdgeSecurityService struct { // last character, which cannot be a dash. Name string `json:"name,omitempty"` - // Region: [Output Only] URL of the region where the resource resides. - // You must specify this field as part of the HTTP request URL. It is - // not settable as a field in the request body. - Region string `json:"region,omitempty"` + // Network: [Output Only] The URL of the network which the Network + // Attachment belongs to. + Network string `json:"network,omitempty"` - // SecurityPolicy: The resource URL for the network edge security - // service associated with this network edge security service. - SecurityPolicy string `json:"securityPolicy,omitempty"` + // ProducerAcceptLists: Projects that are allowed to connect to this + // network attachment. The project can be specified using its id or + // number. + ProducerAcceptLists []string `json:"producerAcceptLists,omitempty"` + + // ProducerRejectLists: Projects that are not allowed to connect to this + // network attachment. The project can be specified using its id or + // number. + ProducerRejectLists []string `json:"producerRejectLists,omitempty"` + + // Region: [Output Only] URL of the region where the network attachment + // resides. This field applies only to the region resource. You must + // specify this field as part of the HTTP request URL. It is not + // settable as a field in the request body. + Region string `json:"region,omitempty"` // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` - // SelfLinkWithId: [Output Only] Server-defined URL for this resource - // with the resource id. + // SelfLinkWithId: [Output Only] Server-defined URL for this resource's + // resource id. SelfLinkWithId string `json:"selfLinkWithId,omitempty"` + // Subnetworks: An array of URLs where each entry is the URL of a subnet + // provided by the service consumer to use for endpoints in the + // producers that connect to this network attachment. + Subnetworks []string `json:"subnetworks,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "CreationTimestamp") + // ForceSendFields is a list of field names (e.g. "ConnectionEndpoints") // to unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -26379,7 +27308,7 @@ type NetworkEdgeSecurityService struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "CreationTimestamp") to + // NullFields is a list of field names (e.g. "ConnectionEndpoints") to // include in API requests with the JSON null value. By default, fields // with empty values are omitted from API requests. However, any field // with an empty value appearing in NullFields will be sent to the @@ -26389,25 +27318,22 @@ type NetworkEdgeSecurityService struct { NullFields []string `json:"-"` } -func (s *NetworkEdgeSecurityService) MarshalJSON() ([]byte, error) { - type NoMethod NetworkEdgeSecurityService +func (s *NetworkAttachment) MarshalJSON() ([]byte, error) { + type NoMethod NetworkAttachment raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type NetworkEdgeSecurityServiceAggregatedList struct { - Etag string `json:"etag,omitempty"` - +// NetworkAttachmentAggregatedList: Contains a list of +// NetworkAttachmentsScopedList. +type NetworkAttachmentAggregatedList struct { // Id: [Output Only] Unique identifier for the resource; defined by the // server. Id string `json:"id,omitempty"` - // Items: A list of NetworkEdgeSecurityServicesScopedList resources. - Items map[string]NetworkEdgeSecurityServicesScopedList `json:"items,omitempty"` + // Items: A list of NetworkAttachmentsScopedList resources. + Items map[string]NetworkAttachmentsScopedList `json:"items,omitempty"` - // Kind: [Output Only] Type of resource. Always - // compute#networkEdgeSecurityServiceAggregatedList for lists of Network - // Edge Security Services. Kind string `json:"kind,omitempty"` // NextPageToken: [Output Only] This token allows you to get the next @@ -26421,17 +27347,14 @@ type NetworkEdgeSecurityServiceAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` - // Unreachables: [Output Only] Unreachable resources. - Unreachables []string `json:"unreachables,omitempty"` - // Warning: [Output Only] Informational warning message. - Warning *NetworkEdgeSecurityServiceAggregatedListWarning `json:"warning,omitempty"` + Warning *NetworkAttachmentAggregatedListWarning `json:"warning,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "Etag") to + // ForceSendFields is a list of field names (e.g. "Id") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -26439,7 +27362,7 @@ type NetworkEdgeSecurityServiceAggregatedList struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Etag") to include in API + // NullFields is a list of field names (e.g. "Id") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -26448,15 +27371,704 @@ type NetworkEdgeSecurityServiceAggregatedList struct { NullFields []string `json:"-"` } -func (s *NetworkEdgeSecurityServiceAggregatedList) MarshalJSON() ([]byte, error) { - type NoMethod NetworkEdgeSecurityServiceAggregatedList +func (s *NetworkAttachmentAggregatedList) MarshalJSON() ([]byte, error) { + type NoMethod NetworkAttachmentAggregatedList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// NetworkEdgeSecurityServiceAggregatedListWarning: [Output Only] -// Informational warning message. -type NetworkEdgeSecurityServiceAggregatedListWarning struct { +// NetworkAttachmentAggregatedListWarning: [Output Only] Informational +// warning message. +type NetworkAttachmentAggregatedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" - Warning about failed cleanup of transient + // changes made by a failed operation. + // "DEPRECATED_RESOURCE_USED" - A link to a deprecated resource was + // created. + // "DEPRECATED_TYPE_USED" - When deploying and at least one of the + // resources has a type marked as deprecated + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - The user created a boot disk + // that is larger than image size. + // "EXPERIMENTAL_TYPE_USED" - When deploying and at least one of the + // resources has a type marked as experimental + // "EXTERNAL_API_WARNING" - Warning that is present in an external api + // call + // "FIELD_VALUE_OVERRIDEN" - Warning that value of a field has been + // overridden. Deprecated unused field. + // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an + // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. + // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a + // exceedingly large number of resources + // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is + // not assigned to an instance on the network. + // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot + // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. + // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL + // refers to an instance that does not exist. + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance + // URL refers to an instance that is not on the same network as the + // route. + // "NEXT_HOP_NOT_RUNNING" - The route's next hop instance does not + // have a status of RUNNING. + // "NOT_CRITICAL_ERROR" - Error which is not critical. We decided to + // continue the process despite the mentioned error. + // "NO_RESULTS_ON_PAGE" - No results are present on a particular list + // page. + // "PARTIAL_SUCCESS" - Success is reported, but some results may be + // missing due to errors + // "REQUIRED_TOS_AGREEMENT" - The user attempted to use a resource + // that requires a TOS they have not accepted. + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - Warning that a + // resource is in use. + // "RESOURCE_NOT_DELETED" - One or more of the resources set to + // auto-delete could not be deleted because they were in use. + // "SCHEMA_VALIDATION_IGNORED" - When a resource schema validation is + // ignored. + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - Instance template used in + // instance group manager is valid as such, but its application does not + // make a lot of sense, because it allows only single instance in + // instance group. + // "UNDECLARED_PROPERTIES" - When undeclared properties in the schema + // are present + // "UNREACHABLE" - A given scope cannot be reached. + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" + // } + Data []*NetworkAttachmentAggregatedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *NetworkAttachmentAggregatedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod NetworkAttachmentAggregatedListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type NetworkAttachmentAggregatedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *NetworkAttachmentAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod NetworkAttachmentAggregatedListWarningData + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// NetworkAttachmentConnectedEndpoint: [Output Only] A connection +// connected to this network attachment. +type NetworkAttachmentConnectedEndpoint struct { + // IpAddress: The IP address assigned to the producer instance network + // interface. This value will be a range in case of Serverless. + IpAddress string `json:"ipAddress,omitempty"` + + // ProjectIdOrNum: The project id or number of the interface to which + // the IP was assigned. + ProjectIdOrNum string `json:"projectIdOrNum,omitempty"` + + // SecondaryIpCidrRanges: Alias IP ranges from the same subnetwork + SecondaryIpCidrRanges []string `json:"secondaryIpCidrRanges,omitempty"` + + // Status: The status of a connected endpoint to this network + // attachment. + // + // Possible values: + // "ACCEPTED" - The consumer allows traffic from the producer to reach + // its VPC. + // "CLOSED" - The consumer network attachment no longer exists. + // "NEEDS_ATTENTION" - The consumer needs to take further action + // before traffic can be served. + // "PENDING" - The consumer neither allows nor prohibits traffic from + // the producer to reach its VPC. + // "REJECTED" - The consumer prohibits traffic from the producer to + // reach its VPC. + // "STATUS_UNSPECIFIED" + Status string `json:"status,omitempty"` + + // Subnetwork: The subnetwork used to assign the IP to the producer + // instance network interface. + Subnetwork string `json:"subnetwork,omitempty"` + + // ForceSendFields is a list of field names (e.g. "IpAddress") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "IpAddress") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *NetworkAttachmentConnectedEndpoint) MarshalJSON() ([]byte, error) { + type NoMethod NetworkAttachmentConnectedEndpoint + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type NetworkAttachmentList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: A list of NetworkAttachment resources. + Items []*NetworkAttachment `json:"items,omitempty"` + + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *NetworkAttachmentListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *NetworkAttachmentList) MarshalJSON() ([]byte, error) { + type NoMethod NetworkAttachmentList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// NetworkAttachmentListWarning: [Output Only] Informational warning +// message. +type NetworkAttachmentListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" - Warning about failed cleanup of transient + // changes made by a failed operation. + // "DEPRECATED_RESOURCE_USED" - A link to a deprecated resource was + // created. + // "DEPRECATED_TYPE_USED" - When deploying and at least one of the + // resources has a type marked as deprecated + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - The user created a boot disk + // that is larger than image size. + // "EXPERIMENTAL_TYPE_USED" - When deploying and at least one of the + // resources has a type marked as experimental + // "EXTERNAL_API_WARNING" - Warning that is present in an external api + // call + // "FIELD_VALUE_OVERRIDEN" - Warning that value of a field has been + // overridden. Deprecated unused field. + // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an + // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. + // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a + // exceedingly large number of resources + // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is + // not assigned to an instance on the network. + // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot + // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. + // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL + // refers to an instance that does not exist. + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance + // URL refers to an instance that is not on the same network as the + // route. + // "NEXT_HOP_NOT_RUNNING" - The route's next hop instance does not + // have a status of RUNNING. + // "NOT_CRITICAL_ERROR" - Error which is not critical. We decided to + // continue the process despite the mentioned error. + // "NO_RESULTS_ON_PAGE" - No results are present on a particular list + // page. + // "PARTIAL_SUCCESS" - Success is reported, but some results may be + // missing due to errors + // "REQUIRED_TOS_AGREEMENT" - The user attempted to use a resource + // that requires a TOS they have not accepted. + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - Warning that a + // resource is in use. + // "RESOURCE_NOT_DELETED" - One or more of the resources set to + // auto-delete could not be deleted because they were in use. + // "SCHEMA_VALIDATION_IGNORED" - When a resource schema validation is + // ignored. + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - Instance template used in + // instance group manager is valid as such, but its application does not + // make a lot of sense, because it allows only single instance in + // instance group. + // "UNDECLARED_PROPERTIES" - When undeclared properties in the schema + // are present + // "UNREACHABLE" - A given scope cannot be reached. + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" + // } + Data []*NetworkAttachmentListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *NetworkAttachmentListWarning) MarshalJSON() ([]byte, error) { + type NoMethod NetworkAttachmentListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type NetworkAttachmentListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *NetworkAttachmentListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod NetworkAttachmentListWarningData + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type NetworkAttachmentsScopedList struct { + // NetworkAttachments: A list of NetworkAttachments contained in this + // scope. + NetworkAttachments []*NetworkAttachment `json:"networkAttachments,omitempty"` + + // Warning: Informational warning which replaces the list of network + // attachments when the list is empty. + Warning *NetworkAttachmentsScopedListWarning `json:"warning,omitempty"` + + // ForceSendFields is a list of field names (e.g. "NetworkAttachments") + // to unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "NetworkAttachments") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *NetworkAttachmentsScopedList) MarshalJSON() ([]byte, error) { + type NoMethod NetworkAttachmentsScopedList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// NetworkAttachmentsScopedListWarning: Informational warning which +// replaces the list of network attachments when the list is empty. +type NetworkAttachmentsScopedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" - Warning about failed cleanup of transient + // changes made by a failed operation. + // "DEPRECATED_RESOURCE_USED" - A link to a deprecated resource was + // created. + // "DEPRECATED_TYPE_USED" - When deploying and at least one of the + // resources has a type marked as deprecated + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - The user created a boot disk + // that is larger than image size. + // "EXPERIMENTAL_TYPE_USED" - When deploying and at least one of the + // resources has a type marked as experimental + // "EXTERNAL_API_WARNING" - Warning that is present in an external api + // call + // "FIELD_VALUE_OVERRIDEN" - Warning that value of a field has been + // overridden. Deprecated unused field. + // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an + // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. + // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a + // exceedingly large number of resources + // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is + // not assigned to an instance on the network. + // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot + // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. + // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL + // refers to an instance that does not exist. + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance + // URL refers to an instance that is not on the same network as the + // route. + // "NEXT_HOP_NOT_RUNNING" - The route's next hop instance does not + // have a status of RUNNING. + // "NOT_CRITICAL_ERROR" - Error which is not critical. We decided to + // continue the process despite the mentioned error. + // "NO_RESULTS_ON_PAGE" - No results are present on a particular list + // page. + // "PARTIAL_SUCCESS" - Success is reported, but some results may be + // missing due to errors + // "REQUIRED_TOS_AGREEMENT" - The user attempted to use a resource + // that requires a TOS they have not accepted. + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - Warning that a + // resource is in use. + // "RESOURCE_NOT_DELETED" - One or more of the resources set to + // auto-delete could not be deleted because they were in use. + // "SCHEMA_VALIDATION_IGNORED" - When a resource schema validation is + // ignored. + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - Instance template used in + // instance group manager is valid as such, but its application does not + // make a lot of sense, because it allows only single instance in + // instance group. + // "UNDECLARED_PROPERTIES" - When undeclared properties in the schema + // are present + // "UNREACHABLE" - A given scope cannot be reached. + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" + // } + Data []*NetworkAttachmentsScopedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *NetworkAttachmentsScopedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod NetworkAttachmentsScopedListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type NetworkAttachmentsScopedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *NetworkAttachmentsScopedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod NetworkAttachmentsScopedListWarningData + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// NetworkEdgeSecurityService: Represents a Google Cloud Armor network +// edge security service resource. +type NetworkEdgeSecurityService struct { + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` + + // Fingerprint: Fingerprint of this resource. A hash of the contents + // stored in this object. This field is used in optimistic locking. This + // field will be ignored when inserting a NetworkEdgeSecurityService. An + // up-to-date fingerprint must be provided in order to update the + // NetworkEdgeSecurityService, otherwise the request will fail with + // error 412 conditionNotMet. To see the latest fingerprint, make a + // get() request to retrieve a NetworkEdgeSecurityService. + Fingerprint string `json:"fingerprint,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Kind: [Output only] Type of the resource. Always + // compute#networkEdgeSecurityService for NetworkEdgeSecurityServices + Kind string `json:"kind,omitempty"` + + // Name: Name of the resource. Provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. + Name string `json:"name,omitempty"` + + // Region: [Output Only] URL of the region where the resource resides. + // You must specify this field as part of the HTTP request URL. It is + // not settable as a field in the request body. + Region string `json:"region,omitempty"` + + // SecurityPolicy: The resource URL for the network edge security + // service associated with this network edge security service. + SecurityPolicy string `json:"securityPolicy,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // SelfLinkWithId: [Output Only] Server-defined URL for this resource + // with the resource id. + SelfLinkWithId string `json:"selfLinkWithId,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CreationTimestamp") + // to unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CreationTimestamp") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *NetworkEdgeSecurityService) MarshalJSON() ([]byte, error) { + type NoMethod NetworkEdgeSecurityService + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type NetworkEdgeSecurityServiceAggregatedList struct { + Etag string `json:"etag,omitempty"` + + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: A list of NetworkEdgeSecurityServicesScopedList resources. + Items map[string]NetworkEdgeSecurityServicesScopedList `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource. Always + // compute#networkEdgeSecurityServiceAggregatedList for lists of Network + // Edge Security Services. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Unreachables: [Output Only] Unreachable resources. + Unreachables []string `json:"unreachables,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *NetworkEdgeSecurityServiceAggregatedListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Etag") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Etag") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *NetworkEdgeSecurityServiceAggregatedList) MarshalJSON() ([]byte, error) { + type NoMethod NetworkEdgeSecurityServiceAggregatedList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// NetworkEdgeSecurityServiceAggregatedListWarning: [Output Only] +// Informational warning message. +type NetworkEdgeSecurityServiceAggregatedListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -26890,6 +28502,8 @@ type NetworkEndpointGroup struct { // serverless infrastructure. NetworkEndpointType string `json:"networkEndpointType,omitempty"` + PscData *NetworkEndpointGroupPscData `json:"pscData,omitempty"` + // PscTargetService: The target service url used to set up private // service connection to a Google API or a PSC Producer Service // Attachment. An example value is: @@ -27467,6 +29081,57 @@ func (s *NetworkEndpointGroupListWarningData) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// NetworkEndpointGroupPscData: All data that is specifically relevant +// to only network endpoint groups of type PRIVATE_SERVICE_CONNECT. +type NetworkEndpointGroupPscData struct { + // ConsumerPscAddress: [Output Only] Address allocated from given + // subnetwork for PSC. This IP address acts as a VIP for a PSC NEG, + // allowing it to act as an endpoint in L7 PSC-XLB. + ConsumerPscAddress string `json:"consumerPscAddress,omitempty"` + + // PscConnectionId: [Output Only] The PSC connection id of the PSC + // Network Endpoint Group Consumer. + PscConnectionId uint64 `json:"pscConnectionId,omitempty,string"` + + // PscConnectionStatus: [Output Only] The connection status of the PSC + // Forwarding Rule. + // + // Possible values: + // "ACCEPTED" - The connection has been accepted by the producer. + // "CLOSED" - The connection has been closed by the producer and will + // not serve traffic going forward. + // "NEEDS_ATTENTION" - The connection has been accepted by the + // producer, but the producer needs to take further action before the + // forwarding rule can serve traffic. + // "PENDING" - The connection is pending acceptance by the producer. + // "REJECTED" - The connection has been rejected by the producer. + // "STATUS_UNSPECIFIED" + PscConnectionStatus string `json:"pscConnectionStatus,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ConsumerPscAddress") + // to unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ConsumerPscAddress") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *NetworkEndpointGroupPscData) MarshalJSON() ([]byte, error) { + type NoMethod NetworkEndpointGroupPscData + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type NetworkEndpointGroupsAttachEndpointsRequest struct { // NetworkEndpoints: The list of network endpoints to be attached. NetworkEndpoints []*NetworkEndpoint `json:"networkEndpoints,omitempty"` @@ -27989,7 +29654,10 @@ type NetworkInterface struct { Ipv6AccessType string `json:"ipv6AccessType,omitempty"` // Ipv6Address: An IPv6 internal network address for this network - // interface. + // interface. To use a static internal IP address, it must be unused and + // in the same region as the instance's zone. If not specified, Google + // Cloud will automatically assign an internal IPv6 address from the + // instance's subnetwork. Ipv6Address string `json:"ipv6Address,omitempty"` // Kind: [Output Only] Type of the resource. Always @@ -28015,6 +29683,12 @@ type NetworkInterface struct { // global/networks/default Network string `json:"network,omitempty"` + // NetworkAttachment: The URL of the network attachment that this + // interface should connect to in the following format: + // projects/{project_number}/regions/{region_name}/networkAttachments/{ne + // twork_attachment_name}. + NetworkAttachment string `json:"networkAttachment,omitempty"` + // NetworkIP: An IPv4 internal IP address to assign to the instance for // this network interface. If not specified by the user, an unused // internal IP is assigned by the system. @@ -28702,6 +30376,9 @@ type NodeGroup struct { // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` + // ShareSettings: Share-settings for the node group + ShareSettings *ShareSettings `json:"shareSettings,omitempty"` + // Size: [Output Only] The total number of nodes in the node group. Size int64 `json:"size,omitempty"` @@ -29209,6 +30886,9 @@ type NodeGroupNode struct { // Accelerators: Accelerators for this node. Accelerators []*AcceleratorConfig `json:"accelerators,omitempty"` + // ConsumedResources: Node resources that are reserved by all instances. + ConsumedResources *InstanceConsumptionInfo `json:"consumedResources,omitempty"` + // CpuOvercommitType: CPU overcommit. // // Possible values: @@ -29220,6 +30900,10 @@ type NodeGroupNode struct { // Disks: Local disk configurations. Disks []*LocalDisk `json:"disks,omitempty"` + // InstanceConsumptionData: Instance data that shows consumed resources + // on the node. + InstanceConsumptionData []*InstanceConsumptionData `json:"instanceConsumptionData,omitempty"` + // Instances: Instances scheduled on this node. Instances []string `json:"instances,omitempty"` @@ -29246,6 +30930,9 @@ type NodeGroupNode struct { // "REPAIRING" Status string `json:"status,omitempty"` + // TotalResources: Total amount of available resources on the node. + TotalResources *InstanceConsumptionInfo `json:"totalResources,omitempty"` + // ForceSendFields is a list of field names (e.g. "Accelerators") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any @@ -29761,11 +31448,7 @@ type NodeTemplate struct { // this template. NodeType string `json:"nodeType,omitempty"` - // NodeTypeFlexibility: The flexible properties of the desired node - // type. Node groups that use this node template will create nodes of a - // type that matches these properties. This field is mutually exclusive - // with the node_type property; you can only define one or the other, - // but not both. + // NodeTypeFlexibility: Do not use. Instead, use the node_type property. NodeTypeFlexibility *NodeTemplateNodeTypeFlexibility `json:"nodeTypeFlexibility,omitempty"` // Region: [Output Only] The name of the region where the node template @@ -31124,7 +32807,8 @@ type NotificationEndpointGrpcSettings struct { // ResendInterval: Optional. This field is used to configure how often // to send a full update of all non-healthy backends. If unspecified, // full updates are not sent. If specified, must be in the range between - // 600 seconds to 3600 seconds. Nanos are disallowed. + // 600 seconds to 3600 seconds. Nanos are disallowed. Can only be set + // for regional notification endpoints. ResendInterval *Duration `json:"resendInterval,omitempty"` // RetryDurationSec: How much time (in seconds) is spent attempting @@ -31568,6 +33252,8 @@ type OperationErrorErrorsErrorDetails struct { LocalizedMessage *LocalizedMessage `json:"localizedMessage,omitempty"` + QuotaInfo *QuotaExceededInfo `json:"quotaInfo,omitempty"` + // ForceSendFields is a list of field names (e.g. "ErrorInfo") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any @@ -32285,25 +33971,33 @@ type OutlierDetection struct { // ConsecutiveErrors: Number of errors before a host is ejected from the // connection pool. When the backend host is accessed over HTTP, a 5xx - // return code qualifies as an error. Defaults to 5. + // return code qualifies as an error. Defaults to 5. Not supported when + // the backend service is referenced by a URL map that is bound to + // target gRPC proxy that has validateForProxyless field set to true. ConsecutiveErrors int64 `json:"consecutiveErrors,omitempty"` // ConsecutiveGatewayFailure: The number of consecutive gateway failures // (502, 503, 504 status or connection errors that are mapped to one of // those status codes) before a consecutive gateway failure ejection - // occurs. Defaults to 3. + // occurs. Defaults to 3. Not supported when the backend service is + // referenced by a URL map that is bound to target gRPC proxy that has + // validateForProxyless field set to true. ConsecutiveGatewayFailure int64 `json:"consecutiveGatewayFailure,omitempty"` // EnforcingConsecutiveErrors: The percentage chance that a host will be // actually ejected when an outlier status is detected through // consecutive 5xx. This setting can be used to disable ejection or to - // ramp it up slowly. Defaults to 0. + // ramp it up slowly. Defaults to 0. Not supported when the backend + // service is referenced by a URL map that is bound to target gRPC proxy + // that has validateForProxyless field set to true. EnforcingConsecutiveErrors int64 `json:"enforcingConsecutiveErrors,omitempty"` // EnforcingConsecutiveGatewayFailure: The percentage chance that a host // will be actually ejected when an outlier status is detected through // consecutive gateway failures. This setting can be used to disable - // ejection or to ramp it up slowly. Defaults to 100. + // ejection or to ramp it up slowly. Defaults to 100. Not supported when + // the backend service is referenced by a URL map that is bound to + // target gRPC proxy that has validateForProxyless field set to true. EnforcingConsecutiveGatewayFailure int64 `json:"enforcingConsecutiveGatewayFailure,omitempty"` // EnforcingSuccessRate: The percentage chance that a host will be @@ -33307,9 +35001,9 @@ type PathMatcher struct { // defaultRouteAction specifies any weightedBackendServices, // defaultService must not be set. Conversely if defaultService is set, // defaultRouteAction cannot contain any weightedBackendServices. Only - // one of defaultRouteAction or defaultUrlRedirect must be set. UrlMaps - // for external HTTP(S) load balancers support only the urlRewrite - // action within a path matcher's defaultRouteAction. + // one of defaultRouteAction or defaultUrlRedirect must be set. URL maps + // for Classic external HTTP(S) load balancers only support the + // urlRewrite action within a path matcher's defaultRouteAction. DefaultRouteAction *HttpRouteAction `json:"defaultRouteAction,omitempty"` // DefaultService: The full or partial URL to the BackendService @@ -33412,9 +35106,9 @@ type PathRule struct { // backend. If routeAction specifies any weightedBackendServices, // service must not be set. Conversely if service is set, routeAction // cannot contain any weightedBackendServices. Only one of routeAction - // or urlRedirect must be set. URL maps for external HTTP(S) load - // balancers support only the urlRewrite action within a path rule's - // routeAction. + // or urlRedirect must be set. URL maps for Classic external HTTP(S) + // load balancers only support the urlRewrite action within a path + // rule's routeAction. RouteAction *HttpRouteAction `json:"routeAction,omitempty"` // Service: The full or partial URL of the backend service resource to @@ -33809,6 +35503,16 @@ type Project struct { // the Google Cloud Storage bucket where they are stored. UsageExportLocation *UsageExportLocation `json:"usageExportLocation,omitempty"` + // VmDnsSetting: [Output Only] Default internal DNS setting used by VMs + // running in this project. + // + // Possible values: + // "GLOBAL_DEFAULT" + // "UNSPECIFIED_VM_DNS_SETTING" + // "ZONAL_DEFAULT" + // "ZONAL_ONLY" + VmDnsSetting string `json:"vmDnsSetting,omitempty"` + // XpnProjectStatus: [Output Only] The role this project has in a shared // VPC configuration. Currently, only projects with the host role, which // is specified by the value HOST, are differentiated. @@ -35121,8 +36825,12 @@ type Quota struct { // "EXTERNAL_VPN_GATEWAYS" // "FIREWALLS" // "FORWARDING_RULES" + // "GLOBAL_EXTERNAL_MANAGED_BACKEND_SERVICES" // "GLOBAL_EXTERNAL_MANAGED_FORWARDING_RULES" + // "GLOBAL_EXTERNAL_PROXY_LB_BACKEND_SERVICES" // "GLOBAL_INTERNAL_ADDRESSES" + // "GLOBAL_INTERNAL_MANAGED_BACKEND_SERVICES" + // "GLOBAL_INTERNAL_TRAFFIC_DIRECTOR_BACKEND_SERVICES" // "GPUS_ALL_REGIONS" // "HEALTH_CHECKS" // "IMAGES" @@ -35149,6 +36857,7 @@ type Quota struct { // "N2D_CPUS" // "N2_CPUS" // "NETWORKS" + // "NETWORK_ATTACHMENTS" // "NETWORK_ENDPOINT_GROUPS" // "NETWORK_FIREWALL_POLICIES" // "NODE_GROUPS" @@ -35182,7 +36891,11 @@ type Quota struct { // "PUBLIC_ADVERTISED_PREFIXES" // "PUBLIC_DELEGATED_PREFIXES" // "REGIONAL_AUTOSCALERS" + // "REGIONAL_EXTERNAL_MANAGED_BACKEND_SERVICES" + // "REGIONAL_EXTERNAL_NETWORK_LB_BACKEND_SERVICES" // "REGIONAL_INSTANCE_GROUP_MANAGERS" + // "REGIONAL_INTERNAL_LB_BACKEND_SERVICES" + // "REGIONAL_INTERNAL_MANAGED_BACKEND_SERVICES" // "RESERVATIONS" // "RESOURCE_POLICIES" // "ROUTERS" @@ -35199,6 +36912,7 @@ type Quota struct { // "SSL_CERTIFICATES" // "STATIC_ADDRESSES" // "STATIC_BYOIP_ADDRESSES" + // "STATIC_EXTERNAL_IPV6_ADDRESS_RANGES" // "SUBNETWORKS" // "T2A_CPUS" // "T2D_CPUS" @@ -35261,6 +36975,59 @@ func (s *Quota) UnmarshalJSON(data []byte) error { return nil } +// QuotaExceededInfo: Additional details for quota exceeded error for +// resource quota. +type QuotaExceededInfo struct { + // Dimensions: The map holding related quota dimensions. + Dimensions map[string]string `json:"dimensions,omitempty"` + + // Limit: Current effective quota limit. The limit's unit depends on the + // quota type or metric. + Limit float64 `json:"limit,omitempty"` + + // LimitName: The name of the quota limit. + LimitName string `json:"limitName,omitempty"` + + // MetricName: The Compute Engine quota metric name. + MetricName string `json:"metricName,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Dimensions") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Dimensions") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *QuotaExceededInfo) MarshalJSON() ([]byte, error) { + type NoMethod QuotaExceededInfo + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +func (s *QuotaExceededInfo) UnmarshalJSON(data []byte) error { + type NoMethod QuotaExceededInfo + var s1 struct { + Limit gensupport.JSONFloat64 `json:"limit"` + *NoMethod + } + s1.NoMethod = (*NoMethod)(s) + if err := json.Unmarshal(data, &s1); err != nil { + return err + } + s.Limit = float64(s1.Limit) + return nil +} + // Reference: Represents a reference to a resource. type Reference struct { // Kind: [Output Only] Type of the resource. Always compute#reference @@ -37556,6 +39323,15 @@ type Reservation struct { // be a dash. Name string `json:"name,omitempty"` + // ResourcePolicies: Resource policies to be added to this reservation. + // The key is defined by user, and the value is resource policy url. + // This is to define placement policy with reservation. + ResourcePolicies map[string]string `json:"resourcePolicies,omitempty"` + + // ResourceStatus: [Output Only] Status information for Reservation + // resource. + ResourceStatus *AllocationResourceStatus `json:"resourceStatus,omitempty"` + // SatisfiesPzs: [Output Only] Reserved for future use. SatisfiesPzs bool `json:"satisfiesPzs,omitempty"` @@ -37563,7 +39339,10 @@ type Reservation struct { // resource. SelfLink string `json:"selfLink,omitempty"` - // ShareSettings: Share-settings for shared-reservation + // ShareSettings: Specify share-settings to create a shared reservation. + // This property is optional. For more information about the syntax and + // options for this field and its subfields, see the guide for creating + // a shared reservation. ShareSettings *ShareSettings `json:"shareSettings,omitempty"` // SpecificReservation: Reservation for instances with specific machine @@ -38261,7 +40040,7 @@ type ResourceCommitment struct { Amount int64 `json:"amount,omitempty,string"` // Type: Type of resource for which this commitment applies. Possible - // values are VCPU and MEMORY + // values are VCPU, MEMORY, LOCAL_SSD, and ACCELERATOR. // // Possible values: // "ACCELERATOR" @@ -38912,7 +40691,7 @@ type ResourcePolicyInstanceSchedulePolicy struct { // TimeZone: Specifies the time zone to be used in interpreting // Schedule.schedule. The value of this field must be a time zone name - // from the tz database: http://en.wikipedia.org/wiki/Tz_database. + // from the tz database: https://wikipedia.org/wiki/Tz_database. TimeZone string `json:"timeZone,omitempty"` // VmStartSchedule: Specifies the schedule for starting instances. @@ -39465,6 +41244,37 @@ func (s *ResourcePolicyWeeklyCycleDayOfWeek) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// ResourceStatus: Contains output only fields. Use this sub-message for +// actual values set on Instance attributes as compared to the value +// requested by the user (intent) in their instance CRUD calls. +type ResourceStatus struct { + // PhysicalHost: [Output Only] An opaque ID of the host on which the VM + // is running. + PhysicalHost string `json:"physicalHost,omitempty"` + + // ForceSendFields is a list of field names (e.g. "PhysicalHost") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "PhysicalHost") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ResourceStatus) MarshalJSON() ([]byte, error) { + type NoMethod ResourceStatus + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // Route: Represents a Route resource. A route defines a path from VM // instances in the VPC network to a specific destination. This // destination can be inside or outside the VPC network. For more @@ -40000,8 +41810,7 @@ type Router struct { Description string `json:"description,omitempty"` // EncryptedInterconnectRouter: Indicates if a router is dedicated for - // use with encrypted VLAN attachments (interconnectAttachments). Not - // currently available publicly. + // use with encrypted VLAN attachments (interconnectAttachments). EncryptedInterconnectRouter bool `json:"encryptedInterconnectRouter,omitempty"` // Id: [Output Only] The unique identifier for the resource. This @@ -40017,6 +41826,9 @@ type Router struct { // routers. Kind string `json:"kind,omitempty"` + // Md5AuthenticationKeys: Keys used for MD5 authentication. + Md5AuthenticationKeys []*RouterMd5AuthenticationKey `json:"md5AuthenticationKeys,omitempty"` + // Name: Name of the resource. Provided by the client when the resource // is created. The name must be 1-63 characters long, and comply with // RFC1035. Specifically, the name must be 1-63 characters long and @@ -40369,9 +42181,9 @@ type RouterBgpPeer struct { AdvertiseMode string `json:"advertiseMode,omitempty"` // AdvertisedGroups: User-specified list of prefix groups to advertise - // in custom mode, which can take one of the following options: - - // ALL_SUBNETS: Advertises all available subnets, including peer VPC - // subnets. - ALL_VPC_SUBNETS: Advertises the router's own VPC subnets. + // in custom mode, which currently supports the following option: - + // ALL_SUBNETS: Advertises all of the router's own VPC subnets. This + // excludes any routes learned for subnets that use VPC Network Peering. // Note that this field can only be populated if advertise_mode is // CUSTOM and overrides the list defined for the router (in the "bgp" // message). These groups are advertised in addition to any specified @@ -40444,6 +42256,11 @@ type RouterBgpPeer struct { // and managed by user. ManagementType string `json:"managementType,omitempty"` + // Md5AuthenticationKeyName: Present if MD5 authentication is enabled + // for the peering. Must be the name of one of the entries in the + // Router.md5_authentication_keys. The field must comply with RFC1035. + Md5AuthenticationKeyName string `json:"md5AuthenticationKeyName,omitempty"` + // Name: Name of this BGP peer. The name must be 1-63 characters long, // and comply with RFC1035. Specifically, the name must be 1-63 // characters long and match the regular expression @@ -40840,6 +42657,41 @@ func (s *RouterListWarningData) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +type RouterMd5AuthenticationKey struct { + // Key: [Input only] Value of the key. For patch and update calls, it + // can be skipped to copy the value from the previous configuration. + // This is allowed if the key with the same name existed before the + // operation. Maximum length is 80 characters. Can only contain + // printable ASCII characters. + Key string `json:"key,omitempty"` + + // Name: Name used to identify the key. Must be unique within a router. + // Must be referenced by at least one bgpPeer. Must comply with RFC1035. + Name string `json:"name,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *RouterMd5AuthenticationKey) MarshalJSON() ([]byte, error) { + type NoMethod RouterMd5AuthenticationKey + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // RouterNat: Represents a Nat resource. It enables the VMs within the // specified subnetworks to access Internet without external IP // addresses. It specifies a list of subnetworks (and the ranges within) @@ -41206,12 +43058,23 @@ type RouterStatusBgpPeerStatus struct { BfdStatus *BfdStatus `json:"bfdStatus,omitempty"` + // EnableIpv6: Enable IPv6 traffic over BGP Peer. If not specified, it + // is disabled by default. + EnableIpv6 bool `json:"enableIpv6,omitempty"` + // IpAddress: IP address of the local BGP interface. IpAddress string `json:"ipAddress,omitempty"` + // Ipv6NexthopAddress: IPv6 address of the local BGP interface. + Ipv6NexthopAddress string `json:"ipv6NexthopAddress,omitempty"` + // LinkedVpnTunnel: URL of the VPN tunnel that this BGP peer controls. LinkedVpnTunnel string `json:"linkedVpnTunnel,omitempty"` + // Md5AuthEnabled: Informs whether MD5 authentication is enabled on this + // BGP peer. + Md5AuthEnabled bool `json:"md5AuthEnabled,omitempty"` + // Name: Name of this BGP peer. Unique within the Routers resource. Name string `json:"name,omitempty"` @@ -41221,6 +43084,9 @@ type RouterStatusBgpPeerStatus struct { // PeerIpAddress: IP address of the remote BGP interface. PeerIpAddress string `json:"peerIpAddress,omitempty"` + // PeerIpv6NexthopAddress: IPv6 address of the remote BGP interface. + PeerIpv6NexthopAddress string `json:"peerIpv6NexthopAddress,omitempty"` + // RouterApplianceInstance: [Output only] URI of the VM instance that is // used as third-party router appliances such as Next Gen Firewalls, // Virtual Routers, or Router Appliances. The VM instance is the peer @@ -41239,6 +43105,15 @@ type RouterStatusBgpPeerStatus struct { // "UP" Status string `json:"status,omitempty"` + // StatusReason: Indicates why particular status was returned. + // + // Possible values: + // "MD5_AUTH_INTERNAL_PROBLEM" - Indicates internal problems with + // configuration of MD5 authentication. This particular reason can only + // be returned when md5AuthEnabled is true and status is DOWN. + // "STATUS_REASON_UNSPECIFIED" + StatusReason string `json:"statusReason,omitempty"` + // Uptime: Time this session has been up. Format: 14 years, 51 weeks, 6 // days, 23 hours, 59 minutes, 59 seconds Uptime string `json:"uptime,omitempty"` @@ -41665,32 +43540,45 @@ func (s *Rule) MarshalJSON() ([]byte, error) { } type SSLHealthCheck struct { - // Port: The TCP port number for the health check request. The default - // value is 443. Valid values are 1 through 65535. + // Port: The TCP port number to which the health check prober sends + // packets. The default value is 443. Valid values are 1 through 65535. Port int64 `json:"port,omitempty"` - // PortName: Port name as defined in InstanceGroup#NamedPort#name. If - // both port and port_name are defined, port takes precedence. + // PortName: Not supported. PortName string `json:"portName,omitempty"` - // PortSpecification: Specifies how port is selected for health - // checking, can be one of following values: USE_FIXED_PORT: The port - // number in port is used for health checking. USE_NAMED_PORT: The - // portName is used for health checking. USE_SERVING_PORT: For - // NetworkEndpointGroup, the port specified for each network endpoint is - // used for health checking. For other backends, the port or named port - // specified in the Backend Service is used for health checking. If not - // specified, SSL health check follows behavior specified in port and - // portName fields. + // PortSpecification: Specifies how a port is selected for health + // checking. Can be one of the following values: USE_FIXED_PORT: + // Specifies a port number explicitly using the port field in the health + // check. Supported by backend services for pass-through load balancers + // and backend services for proxy load balancers. Not supported by + // target pools. The health check supports all backends supported by the + // backend service provided the backend can be health checked. For + // example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network + // endpoint groups, and instance group backends. USE_NAMED_PORT: Not + // supported. USE_SERVING_PORT: Provides an indirect method of + // specifying the health check port by referring to the backend service. + // Only supported by backend services for proxy load balancers. Not + // supported by target pools. Not supported by backend services for + // pass-through load balancers. Supports all backends that can be health + // checked; for example, GCE_VM_IP_PORT network endpoint groups and + // instance group backends. For GCE_VM_IP_PORT network endpoint group + // backends, the health check uses the port number specified for each + // endpoint in the network endpoint group. For instance group backends, + // the health check uses the port number determined by looking up the + // backend service's named port in the instance group's list of named + // ports. // // Possible values: - // "USE_FIXED_PORT" - The port number in port is used for health - // checking. - // "USE_NAMED_PORT" - The portName is used for health checking. - // "USE_SERVING_PORT" - For NetworkEndpointGroup, the port specified - // for each network endpoint is used for health checking. For other - // backends, the port or named port specified in the Backend Service is - // used for health checking. + // "USE_FIXED_PORT" - The port number in the health check's port is + // used for health checking. Applies to network endpoint group and + // instance group backends. + // "USE_NAMED_PORT" - Not supported. + // "USE_SERVING_PORT" - For network endpoint group backends, the + // health check uses the port number specified on each endpoint in the + // network endpoint group. For instance group backends, the health check + // uses the port number specified for the backend service's named port + // defined in the instance group's named ports. PortSpecification string `json:"portSpecification,omitempty"` // ProxyHeader: Specifies the type of proxy header to append before @@ -41702,15 +43590,17 @@ type SSLHealthCheck struct { // "PROXY_V1" ProxyHeader string `json:"proxyHeader,omitempty"` - // Request: The application data to send once the SSL connection has - // been established (default value is empty). If both request and - // response are empty, the connection establishment alone will indicate - // health. The request data can only be ASCII. + // Request: Instructs the health check prober to send this exact ASCII + // string, up to 1024 bytes in length, after establishing the TCP + // connection and SSL handshake. Request string `json:"request,omitempty"` - // Response: The bytes to match against the beginning of the response - // data. If left empty (the default value), any response will indicate - // health. The response data can only be ASCII. + // Response: Creates a content-based SSL health check. In addition to + // establishing a TCP connection and the TLS handshake, you can + // configure the health check to pass only when the backend sends this + // exact response ASCII string, up to 1024 bytes in length. For details, + // see: + // https://cloud.google.com/load-balancing/docs/health-check-concepts#criteria-protocol-ssl-tcp Response string `json:"response,omitempty"` // ForceSendFields is a list of field names (e.g. "Port") to @@ -42589,9 +44479,11 @@ type SecurityPolicy struct { Region string `json:"region,omitempty"` // Rules: A list of rules that belong to this policy. There must always - // be a default rule (rule with priority 2147483647 and match "*"). If - // no rules are provided when creating a security policy, a default rule - // with action "allow" will be added. + // be a default rule which is a rule with priority 2147483647 and match + // all condition (for the match condition this means match "*" for + // srcIpRanges and for the networkMatch condition every field must be + // either match "*" or not set). If no rules are provided when creating + // a security policy, a default rule with action "allow" will be added. Rules []*SecurityPolicyRule `json:"rules,omitempty"` // SelfLink: [Output Only] Server-defined URL for the resource. @@ -42609,7 +44501,12 @@ type SecurityPolicy struct { // internal service policies can be configured to filter HTTP requests // targeting services managed by Traffic Director in a service mesh. // They filter requests before the request is served from the - // application. This field can be set only at resource creation time. + // application. - CLOUD_ARMOR_NETWORK: Cloud Armor network policies can + // be configured to filter packets targeting network load balancing + // resources such as backend services, target pools, target instances, + // and instances with external IPs. They filter requests before the + // request is served from the application. This field can be set only at + // resource creation time. // // Possible values: // "CLOUD_ARMOR" @@ -42716,6 +44613,10 @@ func (s *SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfig) MarshalJ } type SecurityPolicyAdvancedOptionsConfig struct { + // JsonCustomConfig: Custom configuration to apply the JSON parsing. + // Only applicable when json_parsing is set to STANDARD. + JsonCustomConfig *SecurityPolicyAdvancedOptionsConfigJsonCustomConfig `json:"jsonCustomConfig,omitempty"` + // Possible values: // "DISABLED" // "STANDARD" @@ -42726,7 +44627,7 @@ type SecurityPolicyAdvancedOptionsConfig struct { // "VERBOSE" LogLevel string `json:"logLevel,omitempty"` - // ForceSendFields is a list of field names (e.g. "JsonParsing") to + // ForceSendFields is a list of field names (e.g. "JsonCustomConfig") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -42734,7 +44635,40 @@ type SecurityPolicyAdvancedOptionsConfig struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "JsonParsing") to include + // NullFields is a list of field names (e.g. "JsonCustomConfig") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *SecurityPolicyAdvancedOptionsConfig) MarshalJSON() ([]byte, error) { + type NoMethod SecurityPolicyAdvancedOptionsConfig + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type SecurityPolicyAdvancedOptionsConfigJsonCustomConfig struct { + // ContentTypes: A list of custom Content-Type header values to apply + // the JSON parsing. As per RFC 1341, a Content-Type header value has + // the following format: Content-Type := type "/" subtype *[";" + // parameter] When configuring a custom Content-Type header value, only + // the type/subtype needs to be specified, and the parameters should be + // excluded. + ContentTypes []string `json:"contentTypes,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ContentTypes") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ContentTypes") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as @@ -42743,8 +44677,8 @@ type SecurityPolicyAdvancedOptionsConfig struct { NullFields []string `json:"-"` } -func (s *SecurityPolicyAdvancedOptionsConfig) MarshalJSON() ([]byte, error) { - type NoMethod SecurityPolicyAdvancedOptionsConfig +func (s *SecurityPolicyAdvancedOptionsConfigJsonCustomConfig) MarshalJSON() ([]byte, error) { + type NoMethod SecurityPolicyAdvancedOptionsConfigJsonCustomConfig raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -43030,18 +44964,19 @@ func (s *SecurityPolicyReference) MarshalJSON() ([]byte, error) { // matches this condition (allow or deny). type SecurityPolicyRule struct { // Action: The Action to perform when the rule is matched. The following - // are the valid actions: - allow: allow access to target. - deny(): - // deny access to target, returns the HTTP response code specified - // (valid values are 403, 404, and 502). - rate_based_ban: limit client - // traffic to the configured threshold and ban the client if the traffic - // exceeds the threshold. Configure parameters for this action in - // RateLimitOptions. Requires rate_limit_options to be set. - redirect: - // redirect to a different target. This can either be an internal - // reCAPTCHA redirect, or an external URL-based redirect via a 302 - // response. Parameters for this action can be configured via - // redirectOptions. - throttle: limit client traffic to the configured - // threshold. Configure parameters for this action in rateLimitOptions. - // Requires rate_limit_options to be set for this. + // are the valid actions: - allow: allow access to target. - + // deny(STATUS): deny access to target, returns the HTTP response code + // specified. Valid values for `STATUS` are 403, 404, and 502. - + // rate_based_ban: limit client traffic to the configured threshold and + // ban the client if the traffic exceeds the threshold. Configure + // parameters for this action in RateLimitOptions. Requires + // rate_limit_options to be set. - redirect: redirect to a different + // target. This can either be an internal reCAPTCHA redirect, or an + // external URL-based redirect via a 302 response. Parameters for this + // action can be configured via redirectOptions. - throttle: limit + // client traffic to the configured threshold. Configure parameters for + // this action in rateLimitOptions. Requires rate_limit_options to be + // set for this. Action string `json:"action,omitempty"` // Description: An optional description of this resource. Provide this @@ -43060,6 +44995,12 @@ type SecurityPolicyRule struct { // If it evaluates to true, the corresponding 'action' is enforced. Match *SecurityPolicyRuleMatcher `json:"match,omitempty"` + // PreconfiguredWafConfig: Preconfigured WAF configuration to be applied + // for the rule. If the rule does not evaluate preconfigured WAF rules, + // i.e., if evaluatePreconfiguredWaf() is not used, this field will have + // no effect. + PreconfiguredWafConfig *SecurityPolicyRulePreconfiguredWafConfig `json:"preconfiguredWafConfig,omitempty"` + // Preview: If set to true, the specified action is not enforced. Preview bool `json:"preview,omitempty"` @@ -43238,6 +45179,130 @@ func (s *SecurityPolicyRuleMatcherConfig) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +type SecurityPolicyRulePreconfiguredWafConfig struct { + // Exclusions: A list of exclusions to apply during preconfigured WAF + // evaluation. + Exclusions []*SecurityPolicyRulePreconfiguredWafConfigExclusion `json:"exclusions,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Exclusions") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Exclusions") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SecurityPolicyRulePreconfiguredWafConfig) MarshalJSON() ([]byte, error) { + type NoMethod SecurityPolicyRulePreconfiguredWafConfig + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type SecurityPolicyRulePreconfiguredWafConfigExclusion struct { + // RequestCookiesToExclude: A list of request cookie names whose value + // will be excluded from inspection during preconfigured WAF evaluation. + RequestCookiesToExclude []*SecurityPolicyRulePreconfiguredWafConfigExclusionFieldParams `json:"requestCookiesToExclude,omitempty"` + + // RequestHeadersToExclude: A list of request header names whose value + // will be excluded from inspection during preconfigured WAF evaluation. + RequestHeadersToExclude []*SecurityPolicyRulePreconfiguredWafConfigExclusionFieldParams `json:"requestHeadersToExclude,omitempty"` + + // RequestQueryParamsToExclude: A list of request query parameter names + // whose value will be excluded from inspection during preconfigured WAF + // evaluation. Note that the parameter can be in the query string or in + // the POST body. + RequestQueryParamsToExclude []*SecurityPolicyRulePreconfiguredWafConfigExclusionFieldParams `json:"requestQueryParamsToExclude,omitempty"` + + // RequestUrisToExclude: A list of request URIs from the request line to + // be excluded from inspection during preconfigured WAF evaluation. When + // specifying this field, the query or fragment part should be excluded. + RequestUrisToExclude []*SecurityPolicyRulePreconfiguredWafConfigExclusionFieldParams `json:"requestUrisToExclude,omitempty"` + + // TargetRuleIds: A list of target rule IDs under the WAF rule set to + // apply the preconfigured WAF exclusion. If omitted, it refers to all + // the rule IDs under the WAF rule set. + TargetRuleIds []string `json:"targetRuleIds,omitempty"` + + // TargetRuleSet: Target WAF rule set to apply the preconfigured WAF + // exclusion. + TargetRuleSet string `json:"targetRuleSet,omitempty"` + + // ForceSendFields is a list of field names (e.g. + // "RequestCookiesToExclude") to unconditionally include in API + // requests. By default, fields with empty or default values are omitted + // from API requests. However, any non-pointer, non-interface field + // appearing in ForceSendFields will be sent to the server regardless of + // whether the field is empty or not. This may be used to include empty + // fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "RequestCookiesToExclude") + // to include in API requests with the JSON null value. By default, + // fields with empty values are omitted from API requests. However, any + // field with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *SecurityPolicyRulePreconfiguredWafConfigExclusion) MarshalJSON() ([]byte, error) { + type NoMethod SecurityPolicyRulePreconfiguredWafConfigExclusion + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type SecurityPolicyRulePreconfiguredWafConfigExclusionFieldParams struct { + // Op: The match operator for the field. + // + // Possible values: + // "CONTAINS" - The operator matches if the field value contains the + // specified value. + // "ENDS_WITH" - The operator matches if the field value ends with the + // specified value. + // "EQUALS" - The operator matches if the field value equals the + // specified value. + // "EQUALS_ANY" - The operator matches if the field value is any + // value. + // "STARTS_WITH" - The operator matches if the field value starts with + // the specified value. + Op string `json:"op,omitempty"` + + // Val: The value of the field. + Val string `json:"val,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Op") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Op") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SecurityPolicyRulePreconfiguredWafConfigExclusionFieldParams) MarshalJSON() ([]byte, error) { + type NoMethod SecurityPolicyRulePreconfiguredWafConfigExclusionFieldParams + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type SecurityPolicyRuleRateLimitOptions struct { // BanDurationSec: Can only be specified if the action for the rule is // "rate_based_ban". If specified, determines the time (in seconds) the @@ -43258,26 +45323,34 @@ type SecurityPolicyRuleRateLimitOptions struct { // EnforceOnKey: Determines the key to enforce the rate_limit_threshold // on. Possible values are: - ALL: A single rate limit threshold is // applied to all the requests matching this rule. This is the default - // value if this field 'enforce_on_key' is not configured. - IP: The - // source IP address of the request is the key. Each IP has this limit - // enforced separately. - HTTP_HEADER: The value of the HTTP header - // whose name is configured under "enforce_on_key_name". The key value - // is truncated to the first 128 bytes of the header value. If no such - // header is present in the request, the key type defaults to ALL. - - // XFF_IP: The first IP address (i.e. the originating client IP address) - // specified in the list of IPs under X-Forwarded-For HTTP header. If no - // such header is present or the value is not a valid IP, the key - // defaults to the source IP address of the request i.e. key type IP. - - // HTTP_COOKIE: The value of the HTTP cookie whose name is configured - // under "enforce_on_key_name". The key value is truncated to the first - // 128 bytes of the cookie value. If no such cookie is present in the - // request, the key type defaults to ALL. + // value if "enforceOnKey" is not configured. - IP: The source IP + // address of the request is the key. Each IP has this limit enforced + // separately. - HTTP_HEADER: The value of the HTTP header whose name is + // configured under "enforceOnKeyName". The key value is truncated to + // the first 128 bytes of the header value. If no such header is present + // in the request, the key type defaults to ALL. - XFF_IP: The first IP + // address (i.e. the originating client IP address) specified in the + // list of IPs under X-Forwarded-For HTTP header. If no such header is + // present or the value is not a valid IP, the key defaults to the + // source IP address of the request i.e. key type IP. - HTTP_COOKIE: The + // value of the HTTP cookie whose name is configured under + // "enforceOnKeyName". The key value is truncated to the first 128 bytes + // of the cookie value. If no such cookie is present in the request, the + // key type defaults to ALL. - HTTP_PATH: The URL path of the HTTP + // request. The key value is truncated to the first 128 bytes. - SNI: + // Server name indication in the TLS session of the HTTPS request. The + // key value is truncated to the first 128 bytes. The key type defaults + // to ALL on a HTTP session. - REGION_CODE: The country/region from + // which the request originates. // // Possible values: // "ALL" // "HTTP_COOKIE" // "HTTP_HEADER" + // "HTTP_PATH" // "IP" + // "REGION_CODE" + // "SNI" // "XFF_IP" EnforceOnKey string `json:"enforceOnKey,omitempty"` @@ -43290,9 +45363,9 @@ type SecurityPolicyRuleRateLimitOptions struct { // ExceedAction: Action to take for requests that are above the // configured rate limit threshold, to either deny with a specified HTTP // response code, or redirect to a different endpoint. Valid options are - // "deny(status)", where valid values for status are 403, 404, 429, and - // 502, and "redirect" where the redirect parameters come from - // exceedRedirectOptions below. + // `deny(STATUS)`, where valid values for `STATUS` are 403, 404, 429, + // and 502, and `redirect`, where the redirect parameters come from + // `exceedRedirectOptions` below. ExceedAction string `json:"exceedAction,omitempty"` // ExceedRedirectOptions: Parameters defining the redirect action that @@ -43935,6 +46008,9 @@ type ServiceAttachmentConsumerProjectLimit struct { // ConnectionLimit: The value of the limit to set. ConnectionLimit int64 `json:"connectionLimit,omitempty"` + // NetworkUrl: The network URL for the network to set the limit for. + NetworkUrl string `json:"networkUrl,omitempty"` + // ProjectIdOrNum: The project id or number for the project to set the // limit for. ProjectIdOrNum string `json:"projectIdOrNum,omitempty"` @@ -44332,6 +46408,7 @@ type ShareSettings struct { // // Possible values: // "LOCAL" - Default value. + // "ORGANIZATION" - Shared-reservation is open to entire Organization // "SHARE_TYPE_UNSPECIFIED" - Default value. This value is unused. // "SPECIFIC_PROJECTS" - Shared-reservation is open to specific // projects @@ -44997,8 +47074,8 @@ func (s *SourceDiskEncryptionKey) MarshalJSON() ([]byte, error) { // creating the instance template from a source instance. type SourceInstanceParams struct { // DiskConfigs: Attached disks configuration. If not provided, defaults - // are applied: For boot disk and any other R/W disks, new custom images - // will be created from each disk. For read-only disks, they will be + // are applied: For boot disk and any other R/W disks, the source images + // for each disk will be used. For read-only disks, they will be // attached in read-only mode. Local SSD disks will be created as blank // volumes. DiskConfigs []*DiskInstantiationConfig `json:"diskConfigs,omitempty"` @@ -45885,16 +47962,18 @@ func (s *SslCertificatesScopedListWarningData) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type SslPoliciesList struct { +type SslPoliciesAggregatedList struct { + Etag string `json:"etag,omitempty"` + // Id: [Output Only] Unique identifier for the resource; defined by the // server. Id string `json:"id,omitempty"` - // Items: A list of SslPolicy resources. - Items []*SslPolicy `json:"items,omitempty"` + // Items: A list of SslPoliciesScopedList resources. + Items map[string]SslPoliciesScopedList `json:"items,omitempty"` - // Kind: [Output Only] Type of the resource. Always - // compute#sslPoliciesList for lists of sslPolicies. + // Kind: [Output Only] Type of resource. Always + // compute#sslPolicyAggregatedList for lists of SSL Policies. Kind string `json:"kind,omitempty"` // NextPageToken: [Output Only] This token allows you to get the next @@ -45908,14 +47987,17 @@ type SslPoliciesList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Unreachables: [Output Only] Unreachable resources. + Unreachables []string `json:"unreachables,omitempty"` + // Warning: [Output Only] Informational warning message. - Warning *SslPoliciesListWarning `json:"warning,omitempty"` + Warning *SslPoliciesAggregatedListWarning `json:"warning,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "Id") to + // ForceSendFields is a list of field names (e.g. "Etag") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -45923,7 +48005,7 @@ type SslPoliciesList struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Id") to include in API + // NullFields is a list of field names (e.g. "Etag") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -45932,14 +48014,15 @@ type SslPoliciesList struct { NullFields []string `json:"-"` } -func (s *SslPoliciesList) MarshalJSON() ([]byte, error) { - type NoMethod SslPoliciesList +func (s *SslPoliciesAggregatedList) MarshalJSON() ([]byte, error) { + type NoMethod SslPoliciesAggregatedList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// SslPoliciesListWarning: [Output Only] Informational warning message. -type SslPoliciesListWarning struct { +// SslPoliciesAggregatedListWarning: [Output Only] Informational warning +// message. +type SslPoliciesAggregatedListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -46007,7 +48090,7 @@ type SslPoliciesListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" // } - Data []*SslPoliciesListWarningData `json:"data,omitempty"` + Data []*SslPoliciesAggregatedListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -46030,13 +48113,13 @@ type SslPoliciesListWarning struct { NullFields []string `json:"-"` } -func (s *SslPoliciesListWarning) MarshalJSON() ([]byte, error) { - type NoMethod SslPoliciesListWarning +func (s *SslPoliciesAggregatedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod SslPoliciesAggregatedListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type SslPoliciesListWarningData struct { +type SslPoliciesAggregatedListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -46067,156 +48150,515 @@ type SslPoliciesListWarningData struct { NullFields []string `json:"-"` } -func (s *SslPoliciesListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod SslPoliciesListWarningData +func (s *SslPoliciesAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod SslPoliciesAggregatedListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type SslPoliciesListAvailableFeaturesResponse struct { - Features []string `json:"features,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the +type SslPoliciesList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Features") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Features") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *SslPoliciesListAvailableFeaturesResponse) MarshalJSON() ([]byte, error) { - type NoMethod SslPoliciesListAvailableFeaturesResponse - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// SslPolicy: Represents an SSL Policy resource. Use SSL policies to -// control the SSL features, such as versions and cipher suites, offered -// by an HTTPS or SSL Proxy load balancer. For more information, read -// SSL Policy Concepts. -type SslPolicy struct { - // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text - // format. - CreationTimestamp string `json:"creationTimestamp,omitempty"` - - // CustomFeatures: A list of features enabled when the selected profile - // is CUSTOM. The method returns the set of features that can be - // specified in this list. This field must be empty if the profile is - // not CUSTOM. - CustomFeatures []string `json:"customFeatures,omitempty"` - - // Description: An optional description of this resource. Provide this - // property when you create the resource. - Description string `json:"description,omitempty"` - - // EnabledFeatures: [Output Only] The list of features enabled in the - // SSL policy. - EnabledFeatures []string `json:"enabledFeatures,omitempty"` - - // Fingerprint: Fingerprint of this resource. A hash of the contents - // stored in this object. This field is used in optimistic locking. This - // field will be ignored when inserting a SslPolicy. An up-to-date - // fingerprint must be provided in order to update the SslPolicy, - // otherwise the request will fail with error 412 conditionNotMet. To - // see the latest fingerprint, make a get() request to retrieve an - // SslPolicy. - Fingerprint string `json:"fingerprint,omitempty"` + Id string `json:"id,omitempty"` - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id uint64 `json:"id,omitempty,string"` + // Items: A list of SslPolicy resources. + Items []*SslPolicy `json:"items,omitempty"` - // Kind: [Output only] Type of the resource. Always compute#sslPolicyfor - // SSL policies. + // Kind: [Output Only] Type of the resource. Always + // compute#sslPoliciesList for lists of sslPolicies. Kind string `json:"kind,omitempty"` - // MinTlsVersion: The minimum version of SSL protocol that can be used - // by the clients to establish a connection with the load balancer. This - // can be one of TLS_1_0, TLS_1_1, TLS_1_2. - // - // Possible values: - // "TLS_1_0" - TLS 1.0 - // "TLS_1_1" - TLS 1.1 - // "TLS_1_2" - TLS 1.2 - MinTlsVersion string `json:"minTlsVersion,omitempty"` - - // Name: Name of the resource. The name must be 1-63 characters long, - // and comply with RFC1035. Specifically, the name must be 1-63 - // characters long and match the regular expression - // `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be - // a lowercase letter, and all following characters must be a dash, - // lowercase letter, or digit, except the last character, which cannot - // be a dash. - Name string `json:"name,omitempty"` - - // Profile: Profile specifies the set of SSL features that can be used - // by the load balancer when negotiating SSL with clients. This can be - // one of COMPATIBLE, MODERN, RESTRICTED, or CUSTOM. If using CUSTOM, - // the set of SSL features to enable must be specified in the - // customFeatures field. - // - // Possible values: - // "COMPATIBLE" - Compatible profile. Allows the broadset set of - // clients, even those which support only out-of-date SSL features to - // negotiate with the load balancer. - // "CUSTOM" - Custom profile. Allow only the set of allowed SSL - // features specified in the customFeatures field. - // "MODERN" - Modern profile. Supports a wide set of SSL features, - // allowing modern clients to negotiate SSL with the load balancer. - // "RESTRICTED" - Restricted profile. Supports a reduced set of SSL - // features, intended to meet stricter compliance requirements. - Profile string `json:"profile,omitempty"` + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` - // SelfLink: [Output Only] Server-defined URL for the resource. + // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` - // Warnings: [Output Only] If potential misconfigurations are detected - // for this SSL policy, this field will be populated with warning - // messages. - Warnings []*SslPolicyWarnings `json:"warnings,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *SslPoliciesListWarning `json:"warning,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "CreationTimestamp") - // to unconditionally include in API requests. By default, fields with + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be // sent to the server regardless of whether the field is empty or not. // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "CreationTimestamp") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *SslPolicy) MarshalJSON() ([]byte, error) { - type NoMethod SslPolicy +func (s *SslPoliciesList) MarshalJSON() ([]byte, error) { + type NoMethod SslPoliciesList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type SslPolicyWarnings struct { +// SslPoliciesListWarning: [Output Only] Informational warning message. +type SslPoliciesListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" - Warning about failed cleanup of transient + // changes made by a failed operation. + // "DEPRECATED_RESOURCE_USED" - A link to a deprecated resource was + // created. + // "DEPRECATED_TYPE_USED" - When deploying and at least one of the + // resources has a type marked as deprecated + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - The user created a boot disk + // that is larger than image size. + // "EXPERIMENTAL_TYPE_USED" - When deploying and at least one of the + // resources has a type marked as experimental + // "EXTERNAL_API_WARNING" - Warning that is present in an external api + // call + // "FIELD_VALUE_OVERRIDEN" - Warning that value of a field has been + // overridden. Deprecated unused field. + // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an + // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. + // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a + // exceedingly large number of resources + // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is + // not assigned to an instance on the network. + // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot + // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. + // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL + // refers to an instance that does not exist. + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance + // URL refers to an instance that is not on the same network as the + // route. + // "NEXT_HOP_NOT_RUNNING" - The route's next hop instance does not + // have a status of RUNNING. + // "NOT_CRITICAL_ERROR" - Error which is not critical. We decided to + // continue the process despite the mentioned error. + // "NO_RESULTS_ON_PAGE" - No results are present on a particular list + // page. + // "PARTIAL_SUCCESS" - Success is reported, but some results may be + // missing due to errors + // "REQUIRED_TOS_AGREEMENT" - The user attempted to use a resource + // that requires a TOS they have not accepted. + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - Warning that a + // resource is in use. + // "RESOURCE_NOT_DELETED" - One or more of the resources set to + // auto-delete could not be deleted because they were in use. + // "SCHEMA_VALIDATION_IGNORED" - When a resource schema validation is + // ignored. + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - Instance template used in + // instance group manager is valid as such, but its application does not + // make a lot of sense, because it allows only single instance in + // instance group. + // "UNDECLARED_PROPERTIES" - When undeclared properties in the schema + // are present + // "UNREACHABLE" - A given scope cannot be reached. + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" + // } + Data []*SslPoliciesListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SslPoliciesListWarning) MarshalJSON() ([]byte, error) { + type NoMethod SslPoliciesListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type SslPoliciesListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SslPoliciesListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod SslPoliciesListWarningData + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type SslPoliciesListAvailableFeaturesResponse struct { + Features []string `json:"features,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Features") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Features") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SslPoliciesListAvailableFeaturesResponse) MarshalJSON() ([]byte, error) { + type NoMethod SslPoliciesListAvailableFeaturesResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type SslPoliciesScopedList struct { + // SslPolicies: A list of SslPolicies contained in this scope. + SslPolicies []*SslPolicy `json:"sslPolicies,omitempty"` + + // Warning: Informational warning which replaces the list of SSL + // policies when the list is empty. + Warning *SslPoliciesScopedListWarning `json:"warning,omitempty"` + + // ForceSendFields is a list of field names (e.g. "SslPolicies") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "SslPolicies") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SslPoliciesScopedList) MarshalJSON() ([]byte, error) { + type NoMethod SslPoliciesScopedList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// SslPoliciesScopedListWarning: Informational warning which replaces +// the list of SSL policies when the list is empty. +type SslPoliciesScopedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" - Warning about failed cleanup of transient + // changes made by a failed operation. + // "DEPRECATED_RESOURCE_USED" - A link to a deprecated resource was + // created. + // "DEPRECATED_TYPE_USED" - When deploying and at least one of the + // resources has a type marked as deprecated + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - The user created a boot disk + // that is larger than image size. + // "EXPERIMENTAL_TYPE_USED" - When deploying and at least one of the + // resources has a type marked as experimental + // "EXTERNAL_API_WARNING" - Warning that is present in an external api + // call + // "FIELD_VALUE_OVERRIDEN" - Warning that value of a field has been + // overridden. Deprecated unused field. + // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an + // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. + // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a + // exceedingly large number of resources + // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is + // not assigned to an instance on the network. + // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot + // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. + // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL + // refers to an instance that does not exist. + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance + // URL refers to an instance that is not on the same network as the + // route. + // "NEXT_HOP_NOT_RUNNING" - The route's next hop instance does not + // have a status of RUNNING. + // "NOT_CRITICAL_ERROR" - Error which is not critical. We decided to + // continue the process despite the mentioned error. + // "NO_RESULTS_ON_PAGE" - No results are present on a particular list + // page. + // "PARTIAL_SUCCESS" - Success is reported, but some results may be + // missing due to errors + // "REQUIRED_TOS_AGREEMENT" - The user attempted to use a resource + // that requires a TOS they have not accepted. + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - Warning that a + // resource is in use. + // "RESOURCE_NOT_DELETED" - One or more of the resources set to + // auto-delete could not be deleted because they were in use. + // "SCHEMA_VALIDATION_IGNORED" - When a resource schema validation is + // ignored. + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - Instance template used in + // instance group manager is valid as such, but its application does not + // make a lot of sense, because it allows only single instance in + // instance group. + // "UNDECLARED_PROPERTIES" - When undeclared properties in the schema + // are present + // "UNREACHABLE" - A given scope cannot be reached. + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" + // } + Data []*SslPoliciesScopedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SslPoliciesScopedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod SslPoliciesScopedListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type SslPoliciesScopedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SslPoliciesScopedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod SslPoliciesScopedListWarningData + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// SslPolicy: Represents an SSL Policy resource. Use SSL policies to +// control the SSL features, such as versions and cipher suites, offered +// by an HTTPS or SSL Proxy load balancer. For more information, read +// SSL Policy Concepts. +type SslPolicy struct { + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // CustomFeatures: A list of features enabled when the selected profile + // is CUSTOM. The method returns the set of features that can be + // specified in this list. This field must be empty if the profile is + // not CUSTOM. + CustomFeatures []string `json:"customFeatures,omitempty"` + + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` + + // EnabledFeatures: [Output Only] The list of features enabled in the + // SSL policy. + EnabledFeatures []string `json:"enabledFeatures,omitempty"` + + // Fingerprint: Fingerprint of this resource. A hash of the contents + // stored in this object. This field is used in optimistic locking. This + // field will be ignored when inserting a SslPolicy. An up-to-date + // fingerprint must be provided in order to update the SslPolicy, + // otherwise the request will fail with error 412 conditionNotMet. To + // see the latest fingerprint, make a get() request to retrieve an + // SslPolicy. + Fingerprint string `json:"fingerprint,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Kind: [Output only] Type of the resource. Always compute#sslPolicyfor + // SSL policies. + Kind string `json:"kind,omitempty"` + + // MinTlsVersion: The minimum version of SSL protocol that can be used + // by the clients to establish a connection with the load balancer. This + // can be one of TLS_1_0, TLS_1_1, TLS_1_2. + // + // Possible values: + // "TLS_1_0" - TLS 1.0 + // "TLS_1_1" - TLS 1.1 + // "TLS_1_2" - TLS 1.2 + MinTlsVersion string `json:"minTlsVersion,omitempty"` + + // Name: Name of the resource. The name must be 1-63 characters long, + // and comply with RFC1035. Specifically, the name must be 1-63 + // characters long and match the regular expression + // `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be + // a lowercase letter, and all following characters must be a dash, + // lowercase letter, or digit, except the last character, which cannot + // be a dash. + Name string `json:"name,omitempty"` + + // Profile: Profile specifies the set of SSL features that can be used + // by the load balancer when negotiating SSL with clients. This can be + // one of COMPATIBLE, MODERN, RESTRICTED, or CUSTOM. If using CUSTOM, + // the set of SSL features to enable must be specified in the + // customFeatures field. + // + // Possible values: + // "COMPATIBLE" - Compatible profile. Allows the broadset set of + // clients, even those which support only out-of-date SSL features to + // negotiate with the load balancer. + // "CUSTOM" - Custom profile. Allow only the set of allowed SSL + // features specified in the customFeatures field. + // "MODERN" - Modern profile. Supports a wide set of SSL features, + // allowing modern clients to negotiate SSL with the load balancer. + // "RESTRICTED" - Restricted profile. Supports a reduced set of SSL + // features, intended to meet stricter compliance requirements. + Profile string `json:"profile,omitempty"` + + // Region: [Output Only] URL of the region where the regional SSL policy + // resides. This field is not applicable to global SSL policies. + Region string `json:"region,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warnings: [Output Only] If potential misconfigurations are detected + // for this SSL policy, this field will be populated with warning + // messages. + Warnings []*SslPolicyWarnings `json:"warnings,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CreationTimestamp") + // to unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CreationTimestamp") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *SslPolicy) MarshalJSON() ([]byte, error) { + type NoMethod SslPolicy + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type SslPolicyWarnings struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -46495,8 +48937,8 @@ type Subnetwork struct { // INTERNAL_HTTPS_LOAD_BALANCER. EnableFlowLogs bool `json:"enableFlowLogs,omitempty"` - // ExternalIpv6Prefix: [Output Only] The external IPv6 address range - // that is assigned to this subnetwork. + // ExternalIpv6Prefix: The external IPv6 address range that is owned by + // this subnetwork. ExternalIpv6Prefix string `json:"externalIpv6Prefix,omitempty"` // Fingerprint: Fingerprint of this resource. A hash of the contents @@ -47465,32 +49907,45 @@ func (s *Subsetting) MarshalJSON() ([]byte, error) { } type TCPHealthCheck struct { - // Port: The TCP port number for the health check request. The default - // value is 80. Valid values are 1 through 65535. + // Port: The TCP port number to which the health check prober sends + // packets. The default value is 80. Valid values are 1 through 65535. Port int64 `json:"port,omitempty"` - // PortName: Port name as defined in InstanceGroup#NamedPort#name. If - // both port and port_name are defined, port takes precedence. + // PortName: Not supported. PortName string `json:"portName,omitempty"` - // PortSpecification: Specifies how port is selected for health - // checking, can be one of following values: USE_FIXED_PORT: The port - // number in port is used for health checking. USE_NAMED_PORT: The - // portName is used for health checking. USE_SERVING_PORT: For - // NetworkEndpointGroup, the port specified for each network endpoint is - // used for health checking. For other backends, the port or named port - // specified in the Backend Service is used for health checking. If not - // specified, TCP health check follows behavior specified in port and - // portName fields. + // PortSpecification: Specifies how a port is selected for health + // checking. Can be one of the following values: USE_FIXED_PORT: + // Specifies a port number explicitly using the port field in the health + // check. Supported by backend services for pass-through load balancers + // and backend services for proxy load balancers. Not supported by + // target pools. The health check supports all backends supported by the + // backend service provided the backend can be health checked. For + // example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network + // endpoint groups, and instance group backends. USE_NAMED_PORT: Not + // supported. USE_SERVING_PORT: Provides an indirect method of + // specifying the health check port by referring to the backend service. + // Only supported by backend services for proxy load balancers. Not + // supported by target pools. Not supported by backend services for + // pass-through load balancers. Supports all backends that can be health + // checked; for example, GCE_VM_IP_PORT network endpoint groups and + // instance group backends. For GCE_VM_IP_PORT network endpoint group + // backends, the health check uses the port number specified for each + // endpoint in the network endpoint group. For instance group backends, + // the health check uses the port number determined by looking up the + // backend service's named port in the instance group's list of named + // ports. // // Possible values: - // "USE_FIXED_PORT" - The port number in port is used for health - // checking. - // "USE_NAMED_PORT" - The portName is used for health checking. - // "USE_SERVING_PORT" - For NetworkEndpointGroup, the port specified - // for each network endpoint is used for health checking. For other - // backends, the port or named port specified in the Backend Service is - // used for health checking. + // "USE_FIXED_PORT" - The port number in the health check's port is + // used for health checking. Applies to network endpoint group and + // instance group backends. + // "USE_NAMED_PORT" - Not supported. + // "USE_SERVING_PORT" - For network endpoint group backends, the + // health check uses the port number specified on each endpoint in the + // network endpoint group. For instance group backends, the health check + // uses the port number specified for the backend service's named port + // defined in the instance group's named ports. PortSpecification string `json:"portSpecification,omitempty"` // ProxyHeader: Specifies the type of proxy header to append before @@ -47502,15 +49957,16 @@ type TCPHealthCheck struct { // "PROXY_V1" ProxyHeader string `json:"proxyHeader,omitempty"` - // Request: The application data to send once the TCP connection has - // been established (default value is empty). If both request and - // response are empty, the connection establishment alone will indicate - // health. The request data can only be ASCII. + // Request: Instructs the health check prober to send this exact ASCII + // string, up to 1024 bytes in length, after establishing the TCP + // connection. Request string `json:"request,omitempty"` - // Response: The bytes to match against the beginning of the response - // data. If left empty (the default value), any response will indicate - // health. The response data can only be ASCII. + // Response: Creates a content-based TCP health check. In addition to + // establishing a TCP connection, you can configure the health check to + // pass only when the backend sends this exact response ASCII string, up + // to 1024 bytes in length. For details, see: + // https://cloud.google.com/load-balancing/docs/health-check-concepts#criteria-protocol-ssl-tcp Response string `json:"response,omitempty"` // ForceSendFields is a list of field names (e.g. "Port") to @@ -51089,6 +53545,174 @@ func (s *TargetSslProxyListWarningData) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +type TargetTcpProxiesScopedList struct { + // TargetTcpProxies: A list of TargetTcpProxies contained in this scope. + TargetTcpProxies []*TargetTcpProxy `json:"targetTcpProxies,omitempty"` + + // Warning: Informational warning which replaces the list of backend + // services when the list is empty. + Warning *TargetTcpProxiesScopedListWarning `json:"warning,omitempty"` + + // ForceSendFields is a list of field names (e.g. "TargetTcpProxies") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "TargetTcpProxies") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *TargetTcpProxiesScopedList) MarshalJSON() ([]byte, error) { + type NoMethod TargetTcpProxiesScopedList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TargetTcpProxiesScopedListWarning: Informational warning which +// replaces the list of backend services when the list is empty. +type TargetTcpProxiesScopedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" - Warning about failed cleanup of transient + // changes made by a failed operation. + // "DEPRECATED_RESOURCE_USED" - A link to a deprecated resource was + // created. + // "DEPRECATED_TYPE_USED" - When deploying and at least one of the + // resources has a type marked as deprecated + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - The user created a boot disk + // that is larger than image size. + // "EXPERIMENTAL_TYPE_USED" - When deploying and at least one of the + // resources has a type marked as experimental + // "EXTERNAL_API_WARNING" - Warning that is present in an external api + // call + // "FIELD_VALUE_OVERRIDEN" - Warning that value of a field has been + // overridden. Deprecated unused field. + // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an + // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. + // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a + // exceedingly large number of resources + // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is + // not assigned to an instance on the network. + // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot + // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. + // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL + // refers to an instance that does not exist. + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance + // URL refers to an instance that is not on the same network as the + // route. + // "NEXT_HOP_NOT_RUNNING" - The route's next hop instance does not + // have a status of RUNNING. + // "NOT_CRITICAL_ERROR" - Error which is not critical. We decided to + // continue the process despite the mentioned error. + // "NO_RESULTS_ON_PAGE" - No results are present on a particular list + // page. + // "PARTIAL_SUCCESS" - Success is reported, but some results may be + // missing due to errors + // "REQUIRED_TOS_AGREEMENT" - The user attempted to use a resource + // that requires a TOS they have not accepted. + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - Warning that a + // resource is in use. + // "RESOURCE_NOT_DELETED" - One or more of the resources set to + // auto-delete could not be deleted because they were in use. + // "SCHEMA_VALIDATION_IGNORED" - When a resource schema validation is + // ignored. + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - Instance template used in + // instance group manager is valid as such, but its application does not + // make a lot of sense, because it allows only single instance in + // instance group. + // "UNDECLARED_PROPERTIES" - When undeclared properties in the schema + // are present + // "UNREACHABLE" - A given scope cannot be reached. + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" + // } + Data []*TargetTcpProxiesScopedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetTcpProxiesScopedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod TargetTcpProxiesScopedListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TargetTcpProxiesScopedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetTcpProxiesScopedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod TargetTcpProxiesScopedListWarningData + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type TargetTcpProxiesSetBackendServiceRequest struct { // Service: The URL of the new BackendService resource for the // targetTcpProxy. @@ -51200,6 +53824,10 @@ type TargetTcpProxy struct { // "PROXY_V1" ProxyHeader string `json:"proxyHeader,omitempty"` + // Region: [Output Only] URL of the region where the regional TCP proxy + // resides. This field is not applicable to global TCP proxy. + Region string `json:"region,omitempty"` + // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` @@ -51234,16 +53862,16 @@ func (s *TargetTcpProxy) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TargetTcpProxyList: Contains a list of TargetTcpProxy resources. -type TargetTcpProxyList struct { +type TargetTcpProxyAggregatedList struct { // Id: [Output Only] Unique identifier for the resource; defined by the // server. Id string `json:"id,omitempty"` - // Items: A list of TargetTcpProxy resources. - Items []*TargetTcpProxy `json:"items,omitempty"` + // Items: A list of TargetTcpProxiesScopedList resources. + Items map[string]TargetTcpProxiesScopedList `json:"items,omitempty"` - // Kind: Type of resource. + // Kind: [Output Only] Type of resource. Always + // compute#targetTcpProxyAggregatedList for lists of Target TCP Proxies. Kind string `json:"kind,omitempty"` // NextPageToken: [Output Only] This token allows you to get the next @@ -51257,8 +53885,11 @@ type TargetTcpProxyList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Unreachables: [Output Only] Unreachable resources. + Unreachables []string `json:"unreachables,omitempty"` + // Warning: [Output Only] Informational warning message. - Warning *TargetTcpProxyListWarning `json:"warning,omitempty"` + Warning *TargetTcpProxyAggregatedListWarning `json:"warning,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. @@ -51281,15 +53912,15 @@ type TargetTcpProxyList struct { NullFields []string `json:"-"` } -func (s *TargetTcpProxyList) MarshalJSON() ([]byte, error) { - type NoMethod TargetTcpProxyList +func (s *TargetTcpProxyAggregatedList) MarshalJSON() ([]byte, error) { + type NoMethod TargetTcpProxyAggregatedList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TargetTcpProxyListWarning: [Output Only] Informational warning -// message. -type TargetTcpProxyListWarning struct { +// TargetTcpProxyAggregatedListWarning: [Output Only] Informational +// warning message. +type TargetTcpProxyAggregatedListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -51357,7 +53988,7 @@ type TargetTcpProxyListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" // } - Data []*TargetTcpProxyListWarningData `json:"data,omitempty"` + Data []*TargetTcpProxyAggregatedListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -51380,13 +54011,13 @@ type TargetTcpProxyListWarning struct { NullFields []string `json:"-"` } -func (s *TargetTcpProxyListWarning) MarshalJSON() ([]byte, error) { - type NoMethod TargetTcpProxyListWarning +func (s *TargetTcpProxyAggregatedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod TargetTcpProxyAggregatedListWarning raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TargetTcpProxyListWarningData struct { +type TargetTcpProxyAggregatedListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -51417,111 +54048,22 @@ type TargetTcpProxyListWarningData struct { NullFields []string `json:"-"` } -func (s *TargetTcpProxyListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod TargetTcpProxyListWarningData - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// TargetVpnGateway: Represents a Target VPN Gateway resource. The -// target VPN gateway resource represents a Classic Cloud VPN gateway. -// For more information, read the the Cloud VPN Overview. -type TargetVpnGateway struct { - // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text - // format. - CreationTimestamp string `json:"creationTimestamp,omitempty"` - - // Description: An optional description of this resource. Provide this - // property when you create the resource. - Description string `json:"description,omitempty"` - - // ForwardingRules: [Output Only] A list of URLs to the ForwardingRule - // resources. ForwardingRules are created using - // compute.forwardingRules.insert and associated with a VPN gateway. - ForwardingRules []string `json:"forwardingRules,omitempty"` - - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id uint64 `json:"id,omitempty,string"` - - // Kind: [Output Only] Type of resource. Always compute#targetVpnGateway - // for target VPN gateways. - Kind string `json:"kind,omitempty"` - - // Name: Name of the resource. Provided by the client when the resource - // is created. The name must be 1-63 characters long, and comply with - // RFC1035. Specifically, the name must be 1-63 characters long and - // match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means - // the first character must be a lowercase letter, and all following - // characters must be a dash, lowercase letter, or digit, except the - // last character, which cannot be a dash. - Name string `json:"name,omitempty"` - - // Network: URL of the network to which this VPN gateway is attached. - // Provided by the client when the VPN gateway is created. - Network string `json:"network,omitempty"` - - // Region: [Output Only] URL of the region where the target VPN gateway - // resides. You must specify this field as part of the HTTP request URL. - // It is not settable as a field in the request body. - Region string `json:"region,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for the resource. - SelfLink string `json:"selfLink,omitempty"` - - // Status: [Output Only] The status of the VPN gateway, which can be one - // of the following: CREATING, READY, FAILED, or DELETING. - // - // Possible values: - // "CREATING" - // "DELETING" - // "FAILED" - // "READY" - Status string `json:"status,omitempty"` - - // Tunnels: [Output Only] A list of URLs to VpnTunnel resources. - // VpnTunnels are created using the compute.vpntunnels.insert method and - // associated with a VPN gateway. - Tunnels []string `json:"tunnels,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "CreationTimestamp") - // to unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "CreationTimestamp") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *TargetVpnGateway) MarshalJSON() ([]byte, error) { - type NoMethod TargetVpnGateway +func (s *TargetTcpProxyAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod TargetTcpProxyAggregatedListWarningData raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TargetVpnGatewayAggregatedList struct { +// TargetTcpProxyList: Contains a list of TargetTcpProxy resources. +type TargetTcpProxyList struct { // Id: [Output Only] Unique identifier for the resource; defined by the // server. Id string `json:"id,omitempty"` - // Items: A list of TargetVpnGateway resources. - Items map[string]TargetVpnGatewaysScopedList `json:"items,omitempty"` + // Items: A list of TargetTcpProxy resources. + Items []*TargetTcpProxy `json:"items,omitempty"` - // Kind: [Output Only] Type of resource. Always compute#targetVpnGateway - // for target VPN gateways. + // Kind: Type of resource. Kind string `json:"kind,omitempty"` // NextPageToken: [Output Only] This token allows you to get the next @@ -51535,11 +54077,289 @@ type TargetVpnGatewayAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` - // Unreachables: [Output Only] Unreachable resources. - Unreachables []string `json:"unreachables,omitempty"` - // Warning: [Output Only] Informational warning message. - Warning *TargetVpnGatewayAggregatedListWarning `json:"warning,omitempty"` + Warning *TargetTcpProxyListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetTcpProxyList) MarshalJSON() ([]byte, error) { + type NoMethod TargetTcpProxyList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TargetTcpProxyListWarning: [Output Only] Informational warning +// message. +type TargetTcpProxyListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" - Warning about failed cleanup of transient + // changes made by a failed operation. + // "DEPRECATED_RESOURCE_USED" - A link to a deprecated resource was + // created. + // "DEPRECATED_TYPE_USED" - When deploying and at least one of the + // resources has a type marked as deprecated + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - The user created a boot disk + // that is larger than image size. + // "EXPERIMENTAL_TYPE_USED" - When deploying and at least one of the + // resources has a type marked as experimental + // "EXTERNAL_API_WARNING" - Warning that is present in an external api + // call + // "FIELD_VALUE_OVERRIDEN" - Warning that value of a field has been + // overridden. Deprecated unused field. + // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an + // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. + // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a + // exceedingly large number of resources + // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is + // not assigned to an instance on the network. + // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot + // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. + // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL + // refers to an instance that does not exist. + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance + // URL refers to an instance that is not on the same network as the + // route. + // "NEXT_HOP_NOT_RUNNING" - The route's next hop instance does not + // have a status of RUNNING. + // "NOT_CRITICAL_ERROR" - Error which is not critical. We decided to + // continue the process despite the mentioned error. + // "NO_RESULTS_ON_PAGE" - No results are present on a particular list + // page. + // "PARTIAL_SUCCESS" - Success is reported, but some results may be + // missing due to errors + // "REQUIRED_TOS_AGREEMENT" - The user attempted to use a resource + // that requires a TOS they have not accepted. + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - Warning that a + // resource is in use. + // "RESOURCE_NOT_DELETED" - One or more of the resources set to + // auto-delete could not be deleted because they were in use. + // "SCHEMA_VALIDATION_IGNORED" - When a resource schema validation is + // ignored. + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - Instance template used in + // instance group manager is valid as such, but its application does not + // make a lot of sense, because it allows only single instance in + // instance group. + // "UNDECLARED_PROPERTIES" - When undeclared properties in the schema + // are present + // "UNREACHABLE" - A given scope cannot be reached. + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" + // } + Data []*TargetTcpProxyListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetTcpProxyListWarning) MarshalJSON() ([]byte, error) { + type NoMethod TargetTcpProxyListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TargetTcpProxyListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetTcpProxyListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod TargetTcpProxyListWarningData + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TargetVpnGateway: Represents a Target VPN Gateway resource. The +// target VPN gateway resource represents a Classic Cloud VPN gateway. +// For more information, read the the Cloud VPN Overview. +type TargetVpnGateway struct { + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` + + // ForwardingRules: [Output Only] A list of URLs to the ForwardingRule + // resources. ForwardingRules are created using + // compute.forwardingRules.insert and associated with a VPN gateway. + ForwardingRules []string `json:"forwardingRules,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Kind: [Output Only] Type of resource. Always compute#targetVpnGateway + // for target VPN gateways. + Kind string `json:"kind,omitempty"` + + // Name: Name of the resource. Provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. + Name string `json:"name,omitempty"` + + // Network: URL of the network to which this VPN gateway is attached. + // Provided by the client when the VPN gateway is created. + Network string `json:"network,omitempty"` + + // Region: [Output Only] URL of the region where the target VPN gateway + // resides. You must specify this field as part of the HTTP request URL. + // It is not settable as a field in the request body. + Region string `json:"region,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // Status: [Output Only] The status of the VPN gateway, which can be one + // of the following: CREATING, READY, FAILED, or DELETING. + // + // Possible values: + // "CREATING" + // "DELETING" + // "FAILED" + // "READY" + Status string `json:"status,omitempty"` + + // Tunnels: [Output Only] A list of URLs to VpnTunnel resources. + // VpnTunnels are created using the compute.vpntunnels.insert method and + // associated with a VPN gateway. + Tunnels []string `json:"tunnels,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CreationTimestamp") + // to unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CreationTimestamp") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *TargetVpnGateway) MarshalJSON() ([]byte, error) { + type NoMethod TargetVpnGateway + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TargetVpnGatewayAggregatedList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: A list of TargetVpnGateway resources. + Items map[string]TargetVpnGatewaysScopedList `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource. Always compute#targetVpnGateway + // for target VPN gateways. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Unreachables: [Output Only] Unreachable resources. + Unreachables []string `json:"unreachables,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *TargetVpnGatewayAggregatedListWarning `json:"warning,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. @@ -52239,8 +55059,8 @@ type UrlMap struct { // any weightedBackendServices, defaultService must not be set. // Conversely if defaultService is set, defaultRouteAction cannot // contain any weightedBackendServices. Only one of defaultRouteAction - // or defaultUrlRedirect must be set. UrlMaps for external HTTP(S) load - // balancers support only the urlRewrite action within + // or defaultUrlRedirect must be set. URL maps for Classic external + // HTTP(S) load balancers only support the urlRewrite action within // defaultRouteAction. defaultRouteAction has no effect when the URL map // is bound to a target gRPC proxy that has the validateForProxyless // field set to true. @@ -53926,7 +56746,7 @@ type VpnGateway struct { // You must always provide an up-to-date fingerprint hash in order to // update or change labels, otherwise the request will fail with error // 412 conditionNotMet. To see the latest fingerprint, make a get() - // request to retrieve an VpnGateway. + // request to retrieve a VpnGateway. LabelFingerprint string `json:"labelFingerprint,omitempty"` // Labels: Labels for this resource. These can only be added or modified @@ -53955,7 +56775,8 @@ type VpnGateway struct { SelfLink string `json:"selfLink,omitempty"` // StackType: The stack type for this VPN gateway to identify the IP - // protocols that are enabled. If not specified, IPV4_ONLY will be used. + // protocols that are enabled. Possible values are: IPV4_ONLY, + // IPV4_IPV6. If not specified, IPV4_ONLY will be used. // // Possible values: // "IPV4_IPV6" - Enable VPN gateway with both IPv4 and IPv6 protocols. @@ -54548,9 +57369,9 @@ type VpnGatewayVpnGatewayInterface struct { // InterconnectAttachment: URL of the VLAN attachment // (interconnectAttachment) resource for this VPN gateway interface. // When the value of this field is present, the VPN gateway is used for - // IPsec-encrypted Cloud Interconnect; all egress or ingress traffic for + // HA VPN over Cloud Interconnect; all egress or ingress traffic for // this VPN gateway interface goes through the specified VLAN attachment - // resource. Not currently available publicly. + // resource. InterconnectAttachment string `json:"interconnectAttachment,omitempty"` // IpAddress: [Output Only] IP address for this VPN interface associated @@ -54558,11 +57379,11 @@ type VpnGatewayVpnGatewayInterface struct { // external IP address or a regional internal IP address. The two IP // addresses for a VPN gateway must be all regional external or regional // internal IP addresses. There cannot be a mix of regional external IP - // addresses and regional internal IP addresses. For IPsec-encrypted - // Cloud Interconnect, the IP addresses for both interfaces could either - // be regional internal IP addresses or regional external IP addresses. - // For regular (non IPsec-encrypted Cloud Interconnect) HA VPN tunnels, - // the IP address must be a regional external IP address. + // addresses and regional internal IP addresses. For HA VPN over Cloud + // Interconnect, the IP addresses for both interfaces could either be + // regional internal IP addresses or regional external IP addresses. For + // regular (non HA VPN over Cloud Interconnect) HA VPN tunnels, the IP + // address must be a regional external IP address. IpAddress string `json:"ipAddress,omitempty"` // ForceSendFields is a list of field names (e.g. "Id") to @@ -54837,7 +57658,9 @@ type VpnTunnel struct { // PeerExternalGatewayInterface: The interface ID of the external VPN // gateway to which this VPN tunnel is connected. Provided by the client - // when the VPN tunnel is created. + // when the VPN tunnel is created. Possible values are: `0`, `1`, `2`, + // `3`. The number of IDs in use depends on the external VPN gateway + // redundancy type. PeerExternalGatewayInterface int64 `json:"peerExternalGatewayInterface,omitempty"` // PeerGcpGateway: URL of the peer side HA GCP VPN gateway to which this @@ -54934,7 +57757,7 @@ type VpnTunnel struct { VpnGateway string `json:"vpnGateway,omitempty"` // VpnGatewayInterface: The interface ID of the VPN gateway with which - // this VPN tunnel is associated. + // this VPN tunnel is associated. Possible values are: `0`, `1`. VpnGatewayInterface int64 `json:"vpnGatewayInterface,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -55560,6 +58383,11 @@ type WafExpressionSetExpression struct { // positive. required Id string `json:"id,omitempty"` + // Sensitivity: The sensitivity value associated with the WAF rule ID. + // This corresponds to the ModSecurity paranoia level, ranging from 1 to + // 4. 0 is reserved for opt-in only rules. + Sensitivity int64 `json:"sensitivity,omitempty"` + // ForceSendFields is a list of field names (e.g. "Id") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any @@ -56391,17 +59219,17 @@ func (c *AcceleratorTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (* if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &AcceleratorTypeAggregatedList{ ServerResponse: googleapi.ServerResponse{ @@ -56601,17 +59429,17 @@ func (c *AcceleratorTypesGetCall) Do(opts ...googleapi.CallOption) (*Accelerator if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &AcceleratorType{ ServerResponse: googleapi.ServerResponse{ @@ -56852,17 +59680,17 @@ func (c *AcceleratorTypesListCall) Do(opts ...googleapi.CallOption) (*Accelerato if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &AcceleratorTypeList{ ServerResponse: googleapi.ServerResponse{ @@ -57152,17 +59980,17 @@ func (c *AddressesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Address if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &AddressAggregatedList{ ServerResponse: googleapi.ServerResponse{ @@ -57364,17 +60192,17 @@ func (c *AddressesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, erro if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -57540,17 +60368,17 @@ func (c *AddressesGetCall) Do(opts ...googleapi.CallOption) (*Address, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Address{ ServerResponse: googleapi.ServerResponse{ @@ -57718,17 +60546,17 @@ func (c *AddressesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, erro if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -57968,17 +60796,17 @@ func (c *AddressesListCall) Do(opts ...googleapi.CallOption) (*AddressList, erro if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &AddressList{ ServerResponse: googleapi.ServerResponse{ @@ -58078,6 +60906,195 @@ func (c *AddressesListCall) Pages(ctx context.Context, f func(*AddressList) erro } } +// method id "compute.addresses.setLabels": + +type AddressesSetLabelsCall struct { + s *Service + project string + region string + resource string + regionsetlabelsrequest *RegionSetLabelsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetLabels: Sets the labels on an Address. To learn more about labels, +// read the Labeling Resources documentation. +// +// - project: Project ID for this request. +// - region: The region for this request. +// - resource: Name or id of the resource for this request. +func (r *AddressesService) SetLabels(project string, region string, resource string, regionsetlabelsrequest *RegionSetLabelsRequest) *AddressesSetLabelsCall { + c := &AddressesSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.resource = resource + c.regionsetlabelsrequest = regionsetlabelsrequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *AddressesSetLabelsCall) RequestId(requestId string) *AddressesSetLabelsCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AddressesSetLabelsCall) Fields(s ...googleapi.Field) *AddressesSetLabelsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AddressesSetLabelsCall) Context(ctx context.Context) *AddressesSetLabelsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AddressesSetLabelsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AddressesSetLabelsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regionsetlabelsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/addresses/{resource}/setLabels") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.addresses.setLabels" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *AddressesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Sets the labels on an Address. To learn more about labels, read the Labeling Resources documentation.", + // "flatPath": "projects/{project}/regions/{region}/addresses/{resource}/setLabels", + // "httpMethod": "POST", + // "id": "compute.addresses.setLabels", + // "parameterOrder": [ + // "project", + // "region", + // "resource" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "The region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/regions/{region}/addresses/{resource}/setLabels", + // "request": { + // "$ref": "RegionSetLabelsRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + // method id "compute.autoscalers.aggregatedList": type AutoscalersAggregatedListCall struct { @@ -58268,17 +61285,17 @@ func (c *AutoscalersAggregatedListCall) Do(opts ...googleapi.CallOption) (*Autos if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &AutoscalerAggregatedList{ ServerResponse: googleapi.ServerResponse{ @@ -58480,17 +61497,17 @@ func (c *AutoscalersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, er if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -58566,8 +61583,7 @@ type AutoscalersGetCall struct { header_ http.Header } -// Get: Returns the specified autoscaler resource. Gets a list of -// available autoscalers by making a list() request. +// Get: Returns the specified autoscaler resource. // // - autoscaler: Name of the autoscaler to return. // - project: Project ID for this request. @@ -58657,17 +61673,17 @@ func (c *AutoscalersGetCall) Do(opts ...googleapi.CallOption) (*Autoscaler, erro if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Autoscaler{ ServerResponse: googleapi.ServerResponse{ @@ -58681,7 +61697,7 @@ func (c *AutoscalersGetCall) Do(opts ...googleapi.CallOption) (*Autoscaler, erro } return ret, nil // { - // "description": "Returns the specified autoscaler resource. Gets a list of available autoscalers by making a list() request.", + // "description": "Returns the specified autoscaler resource.", // "flatPath": "projects/{project}/zones/{zone}/autoscalers/{autoscaler}", // "httpMethod": "GET", // "id": "compute.autoscalers.get", @@ -58835,17 +61851,17 @@ func (c *AutoscalersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, er if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -59085,17 +62101,17 @@ func (c *AutoscalersListCall) Do(opts ...googleapi.CallOption) (*AutoscalerList, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &AutoscalerList{ ServerResponse: googleapi.ServerResponse{ @@ -59312,17 +62328,17 @@ func (c *AutoscalersPatchCall) Do(opts ...googleapi.CallOption) (*Operation, err if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -59502,17 +62518,17 @@ func (c *AutoscalersUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, er if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -59591,9 +62607,9 @@ type BackendBucketsAddSignedUrlKeyCall struct { // AddSignedUrlKey: Adds a key for validating requests with signed URLs // for this backend bucket. // -// - backendBucket: Name of the BackendBucket resource to which the -// Signed URL Key should be added. The name should conform to RFC1035. -// - project: Project ID for this request. +// - backendBucket: Name of the BackendBucket resource to which the +// Signed URL Key should be added. The name should conform to RFC1035. +// - project: Project ID for this request. func (r *BackendBucketsService) AddSignedUrlKey(project string, backendBucket string, signedurlkey *SignedUrlKey) *BackendBucketsAddSignedUrlKeyCall { c := &BackendBucketsAddSignedUrlKeyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -59686,17 +62702,17 @@ func (c *BackendBucketsAddSignedUrlKeyCall) Do(opts ...googleapi.CallOption) (*O if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -59854,17 +62870,17 @@ func (c *BackendBucketsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -59933,10 +62949,10 @@ type BackendBucketsDeleteSignedUrlKeyCall struct { // DeleteSignedUrlKey: Deletes a key for validating requests with signed // URLs for this backend bucket. // -// - backendBucket: Name of the BackendBucket resource to which the -// Signed URL Key should be added. The name should conform to RFC1035. -// - keyName: The name of the Signed URL Key to delete. -// - project: Project ID for this request. +// - backendBucket: Name of the BackendBucket resource to which the +// Signed URL Key should be added. The name should conform to RFC1035. +// - keyName: The name of the Signed URL Key to delete. +// - project: Project ID for this request. func (r *BackendBucketsService) DeleteSignedUrlKey(project string, backendBucket string, keyName string) *BackendBucketsDeleteSignedUrlKeyCall { c := &BackendBucketsDeleteSignedUrlKeyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -60024,17 +63040,17 @@ func (c *BackendBucketsDeleteSignedUrlKeyCall) Do(opts ...googleapi.CallOption) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -60107,8 +63123,7 @@ type BackendBucketsGetCall struct { header_ http.Header } -// Get: Returns the specified BackendBucket resource. Gets a list of -// available backend buckets by making a list() request. +// Get: Returns the specified BackendBucket resource. // // - backendBucket: Name of the BackendBucket resource to return. // - project: Project ID for this request. @@ -60195,17 +63210,17 @@ func (c *BackendBucketsGetCall) Do(opts ...googleapi.CallOption) (*BackendBucket if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &BackendBucket{ ServerResponse: googleapi.ServerResponse{ @@ -60219,7 +63234,7 @@ func (c *BackendBucketsGetCall) Do(opts ...googleapi.CallOption) (*BackendBucket } return ret, nil // { - // "description": "Returns the specified BackendBucket resource. Gets a list of available backend buckets by making a list() request.", + // "description": "Returns the specified BackendBucket resource.", // "flatPath": "projects/{project}/global/backendBuckets/{backendBucket}", // "httpMethod": "GET", // "id": "compute.backendBuckets.get", @@ -60361,17 +63376,17 @@ func (c *BackendBucketsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -60599,17 +63614,17 @@ func (c *BackendBucketsListCall) Do(opts ...googleapi.CallOption) (*BackendBucke if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &BackendBucketList{ ServerResponse: googleapi.ServerResponse{ @@ -60811,17 +63826,17 @@ func (c *BackendBucketsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -60894,9 +63909,9 @@ type BackendBucketsSetEdgeSecurityPolicyCall struct { // SetEdgeSecurityPolicy: Sets the edge security policy for the // specified backend bucket. // -// - backendBucket: Name of the BackendService resource to which the -// security policy should be set. The name should conform to RFC1035. -// - project: Project ID for this request. +// - backendBucket: Name of the BackendService resource to which the +// security policy should be set. The name should conform to RFC1035. +// - project: Project ID for this request. func (r *BackendBucketsService) SetEdgeSecurityPolicy(project string, backendBucket string, securitypolicyreference *SecurityPolicyReference) *BackendBucketsSetEdgeSecurityPolicyCall { c := &BackendBucketsSetEdgeSecurityPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -60989,17 +64004,17 @@ func (c *BackendBucketsSetEdgeSecurityPolicyCall) Do(opts ...googleapi.CallOptio if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -61165,17 +64180,17 @@ func (c *BackendBucketsUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -61248,9 +64263,9 @@ type BackendServicesAddSignedUrlKeyCall struct { // AddSignedUrlKey: Adds a key for validating requests with signed URLs // for this backend service. // -// - backendService: Name of the BackendService resource to which the -// Signed URL Key should be added. The name should conform to RFC1035. -// - project: Project ID for this request. +// - backendService: Name of the BackendService resource to which the +// Signed URL Key should be added. The name should conform to RFC1035. +// - project: Project ID for this request. func (r *BackendServicesService) AddSignedUrlKey(project string, backendService string, signedurlkey *SignedUrlKey) *BackendServicesAddSignedUrlKeyCall { c := &BackendServicesAddSignedUrlKeyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -61343,17 +64358,17 @@ func (c *BackendServicesAddSignedUrlKeyCall) Do(opts ...googleapi.CallOption) (* if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -61601,17 +64616,17 @@ func (c *BackendServicesAggregatedListCall) Do(opts ...googleapi.CallOption) (*B if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &BackendServiceAggregatedList{ ServerResponse: googleapi.ServerResponse{ @@ -61809,17 +64824,17 @@ func (c *BackendServicesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -61888,10 +64903,10 @@ type BackendServicesDeleteSignedUrlKeyCall struct { // DeleteSignedUrlKey: Deletes a key for validating requests with signed // URLs for this backend service. // -// - backendService: Name of the BackendService resource to which the -// Signed URL Key should be added. The name should conform to RFC1035. -// - keyName: The name of the Signed URL Key to delete. -// - project: Project ID for this request. +// - backendService: Name of the BackendService resource to which the +// Signed URL Key should be added. The name should conform to RFC1035. +// - keyName: The name of the Signed URL Key to delete. +// - project: Project ID for this request. func (r *BackendServicesService) DeleteSignedUrlKey(project string, backendService string, keyName string) *BackendServicesDeleteSignedUrlKeyCall { c := &BackendServicesDeleteSignedUrlKeyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -61979,17 +64994,17 @@ func (c *BackendServicesDeleteSignedUrlKeyCall) Do(opts ...googleapi.CallOption) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -62062,8 +65077,7 @@ type BackendServicesGetCall struct { header_ http.Header } -// Get: Returns the specified BackendService resource. Gets a list of -// available backend services. +// Get: Returns the specified BackendService resource. // // - backendService: Name of the BackendService resource to return. // - project: Project ID for this request. @@ -62150,17 +65164,17 @@ func (c *BackendServicesGetCall) Do(opts ...googleapi.CallOption) (*BackendServi if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &BackendService{ ServerResponse: googleapi.ServerResponse{ @@ -62174,7 +65188,7 @@ func (c *BackendServicesGetCall) Do(opts ...googleapi.CallOption) (*BackendServi } return ret, nil // { - // "description": "Returns the specified BackendService resource. Gets a list of available backend services.", + // "description": "Returns the specified BackendService resource.", // "flatPath": "projects/{project}/global/backendServices/{backendService}", // "httpMethod": "GET", // "id": "compute.backendServices.get", @@ -62227,9 +65241,9 @@ type BackendServicesGetHealthCall struct { // BackendService. Example request body: { "group": // "/zones/us-east1-b/instanceGroups/lb-backend-example" } // -// - backendService: Name of the BackendService resource to which the -// queried instance belongs. -// - project: . +// - backendService: Name of the BackendService resource to which the +// queried instance belongs. +// - project: . func (r *BackendServicesService) GetHealth(project string, backendService string, resourcegroupreference *ResourceGroupReference) *BackendServicesGetHealthCall { c := &BackendServicesGetHealthCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -62306,17 +65320,17 @@ func (c *BackendServicesGetHealthCall) Do(opts ...googleapi.CallOption) (*Backen if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &BackendServiceGroupHealth{ ServerResponse: googleapi.ServerResponse{ @@ -62369,6 +65383,180 @@ func (c *BackendServicesGetHealthCall) Do(opts ...googleapi.CallOption) (*Backen } +// method id "compute.backendServices.getIamPolicy": + +type BackendServicesGetIamPolicyCall struct { + s *Service + project string + resource string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// GetIamPolicy: Gets the access control policy for a resource. May be +// empty if no such policy or resource exists. +// +// - project: Project ID for this request. +// - resource: Name or id of the resource for this request. +func (r *BackendServicesService) GetIamPolicy(project string, resource string) *BackendServicesGetIamPolicyCall { + c := &BackendServicesGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.resource = resource + return c +} + +// OptionsRequestedPolicyVersion sets the optional parameter +// "optionsRequestedPolicyVersion": Requested IAM Policy version. +func (c *BackendServicesGetIamPolicyCall) OptionsRequestedPolicyVersion(optionsRequestedPolicyVersion int64) *BackendServicesGetIamPolicyCall { + c.urlParams_.Set("optionsRequestedPolicyVersion", fmt.Sprint(optionsRequestedPolicyVersion)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BackendServicesGetIamPolicyCall) Fields(s ...googleapi.Field) *BackendServicesGetIamPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *BackendServicesGetIamPolicyCall) IfNoneMatch(entityTag string) *BackendServicesGetIamPolicyCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BackendServicesGetIamPolicyCall) Context(ctx context.Context) *BackendServicesGetIamPolicyCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BackendServicesGetIamPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BackendServicesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/backendServices/{resource}/getIamPolicy") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.backendServices.getIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *BackendServicesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Policy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", + // "flatPath": "projects/{project}/global/backendServices/{resource}/getIamPolicy", + // "httpMethod": "GET", + // "id": "compute.backendServices.getIamPolicy", + // "parameterOrder": [ + // "project", + // "resource" + // ], + // "parameters": { + // "optionsRequestedPolicyVersion": { + // "description": "Requested IAM Policy version.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/global/backendServices/{resource}/getIamPolicy", + // "response": { + // "$ref": "Policy" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + // method id "compute.backendServices.insert": type BackendServicesInsertCall struct { @@ -62475,17 +65663,17 @@ func (c *BackendServicesInsertCall) Do(opts ...googleapi.CallOption) (*Operation if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -62713,17 +65901,17 @@ func (c *BackendServicesListCall) Do(opts ...googleapi.CallOption) (*BackendServ if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &BackendServiceList{ ServerResponse: googleapi.ServerResponse{ @@ -62926,17 +66114,17 @@ func (c *BackendServicesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -63009,10 +66197,10 @@ type BackendServicesSetEdgeSecurityPolicyCall struct { // SetEdgeSecurityPolicy: Sets the edge security policy for the // specified backend service. // -// - backendService: Name of the BackendService resource to which the -// edge security policy should be set. The name should conform to -// RFC1035. -// - project: Project ID for this request. +// - backendService: Name of the BackendService resource to which the +// edge security policy should be set. The name should conform to +// RFC1035. +// - project: Project ID for this request. func (r *BackendServicesService) SetEdgeSecurityPolicy(project string, backendService string, securitypolicyreference *SecurityPolicyReference) *BackendServicesSetEdgeSecurityPolicyCall { c := &BackendServicesSetEdgeSecurityPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -63105,17 +66293,17 @@ func (c *BackendServicesSetEdgeSecurityPolicyCall) Do(opts ...googleapi.CallOpti if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -63172,6 +66360,162 @@ func (c *BackendServicesSetEdgeSecurityPolicyCall) Do(opts ...googleapi.CallOpti } +// method id "compute.backendServices.setIamPolicy": + +type BackendServicesSetIamPolicyCall struct { + s *Service + project string + resource string + globalsetpolicyrequest *GlobalSetPolicyRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetIamPolicy: Sets the access control policy on the specified +// resource. Replaces any existing policy. +// +// - project: Project ID for this request. +// - resource: Name or id of the resource for this request. +func (r *BackendServicesService) SetIamPolicy(project string, resource string, globalsetpolicyrequest *GlobalSetPolicyRequest) *BackendServicesSetIamPolicyCall { + c := &BackendServicesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.resource = resource + c.globalsetpolicyrequest = globalsetpolicyrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BackendServicesSetIamPolicyCall) Fields(s ...googleapi.Field) *BackendServicesSetIamPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BackendServicesSetIamPolicyCall) Context(ctx context.Context) *BackendServicesSetIamPolicyCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BackendServicesSetIamPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BackendServicesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.globalsetpolicyrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/backendServices/{resource}/setIamPolicy") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.backendServices.setIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *BackendServicesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Policy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + // "flatPath": "projects/{project}/global/backendServices/{resource}/setIamPolicy", + // "httpMethod": "POST", + // "id": "compute.backendServices.setIamPolicy", + // "parameterOrder": [ + // "project", + // "resource" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/global/backendServices/{resource}/setIamPolicy", + // "request": { + // "$ref": "GlobalSetPolicyRequest" + // }, + // "response": { + // "$ref": "Policy" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + // method id "compute.backendServices.setSecurityPolicy": type BackendServicesSetSecurityPolicyCall struct { @@ -63188,9 +66532,9 @@ type BackendServicesSetSecurityPolicyCall struct { // the specified backend service. For more information, see Google Cloud // Armor Overview // -// - backendService: Name of the BackendService resource to which the -// security policy should be set. The name should conform to RFC1035. -// - project: Project ID for this request. +// - backendService: Name of the BackendService resource to which the +// security policy should be set. The name should conform to RFC1035. +// - project: Project ID for this request. func (r *BackendServicesService) SetSecurityPolicy(project string, backendService string, securitypolicyreference *SecurityPolicyReference) *BackendServicesSetSecurityPolicyCall { c := &BackendServicesSetSecurityPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -63283,17 +66627,17 @@ func (c *BackendServicesSetSecurityPolicyCall) Do(opts ...googleapi.CallOption) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -63460,17 +66804,17 @@ func (c *BackendServicesUpdateCall) Do(opts ...googleapi.CallOption) (*Operation if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -63718,17 +67062,17 @@ func (c *DiskTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*DiskTyp if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &DiskTypeAggregatedList{ ServerResponse: googleapi.ServerResponse{ @@ -63838,8 +67182,7 @@ type DiskTypesGetCall struct { header_ http.Header } -// Get: Returns the specified disk type. Gets a list of available disk -// types by making a list() request. +// Get: Returns the specified disk type. // // - diskType: Name of the disk type to return. // - project: Project ID for this request. @@ -63929,17 +67272,17 @@ func (c *DiskTypesGetCall) Do(opts ...googleapi.CallOption) (*DiskType, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &DiskType{ ServerResponse: googleapi.ServerResponse{ @@ -63953,7 +67296,7 @@ func (c *DiskTypesGetCall) Do(opts ...googleapi.CallOption) (*DiskType, error) { } return ret, nil // { - // "description": "Returns the specified disk type. Gets a list of available disk types by making a list() request.", + // "description": "Returns the specified disk type.", // "flatPath": "projects/{project}/zones/{zone}/diskTypes/{diskType}", // "httpMethod": "GET", // "id": "compute.diskTypes.get", @@ -64180,17 +67523,17 @@ func (c *DiskTypesListCall) Do(opts ...googleapi.CallOption) (*DiskTypeList, err if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &DiskTypeList{ ServerResponse: googleapi.ServerResponse{ @@ -64404,17 +67747,17 @@ func (c *DisksAddResourcePoliciesCall) Do(opts ...googleapi.CallOption) (*Operat if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -64670,17 +68013,17 @@ func (c *DisksAggregatedListCall) Do(opts ...googleapi.CallOption) (*DiskAggrega if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &DiskAggregatedList{ ServerResponse: googleapi.ServerResponse{ @@ -64900,17 +68243,17 @@ func (c *DisksCreateSnapshotCall) Do(opts ...googleapi.CallOption) (*Operation, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -65089,17 +68432,17 @@ func (c *DisksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -65174,8 +68517,7 @@ type DisksGetCall struct { header_ http.Header } -// Get: Returns a specified persistent disk. Gets a list of available -// persistent disks by making a list() request. +// Get: Returns the specified persistent disk. // // - disk: Name of the persistent disk to return. // - project: Project ID for this request. @@ -65265,17 +68607,17 @@ func (c *DisksGetCall) Do(opts ...googleapi.CallOption) (*Disk, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Disk{ ServerResponse: googleapi.ServerResponse{ @@ -65289,7 +68631,7 @@ func (c *DisksGetCall) Do(opts ...googleapi.CallOption) (*Disk, error) { } return ret, nil // { - // "description": "Returns a specified persistent disk. Gets a list of available persistent disks by making a list() request.", + // "description": "Returns the specified persistent disk.", // "flatPath": "projects/{project}/zones/{zone}/disks/{disk}", // "httpMethod": "GET", // "id": "compute.disks.get", @@ -65445,17 +68787,17 @@ func (c *DisksGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Policy{ ServerResponse: googleapi.ServerResponse{ @@ -65640,17 +68982,17 @@ func (c *DisksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -65895,17 +69237,17 @@ func (c *DisksListCall) Do(opts ...googleapi.CallOption) (*DiskList, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &DiskList{ ServerResponse: googleapi.ServerResponse{ @@ -66117,17 +69459,17 @@ func (c *DisksRemoveResourcePoliciesCall) Do(opts ...googleapi.CallOption) (*Ope if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -66306,17 +69648,17 @@ func (c *DisksResizeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -66479,17 +69821,17 @@ func (c *DisksSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Policy{ ServerResponse: googleapi.ServerResponse{ @@ -66663,17 +70005,17 @@ func (c *DisksSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -66836,17 +70178,17 @@ func (c *DisksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPer if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ @@ -66908,6 +70250,221 @@ func (c *DisksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPer } +// method id "compute.disks.update": + +type DisksUpdateCall struct { + s *Service + project string + zone string + disk string + disk2 *Disk + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Update: Updates the specified disk with the data included in the +// request. The update is performed only on selected fields included as +// part of update-mask. Only the following fields can be modified: +// user_license. +// +// - disk: The disk name for this request. +// - project: Project ID for this request. +// - zone: The name of the zone for this request. +func (r *DisksService) Update(project string, zone string, disk string, disk2 *Disk) *DisksUpdateCall { + c := &DisksUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.disk = disk + c.disk2 = disk2 + return c +} + +// Paths sets the optional parameter "paths": +func (c *DisksUpdateCall) Paths(paths ...string) *DisksUpdateCall { + c.urlParams_.SetMulti("paths", append([]string{}, paths...)) + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *DisksUpdateCall) RequestId(requestId string) *DisksUpdateCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// UpdateMask sets the optional parameter "updateMask": update_mask +// indicates fields to be updated as part of this request. +func (c *DisksUpdateCall) UpdateMask(updateMask string) *DisksUpdateCall { + c.urlParams_.Set("updateMask", updateMask) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DisksUpdateCall) Fields(s ...googleapi.Field) *DisksUpdateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *DisksUpdateCall) Context(ctx context.Context) *DisksUpdateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *DisksUpdateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *DisksUpdateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.disk2) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/disks/{disk}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PATCH", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "disk": c.disk, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.disks.update" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *DisksUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates the specified disk with the data included in the request. The update is performed only on selected fields included as part of update-mask. Only the following fields can be modified: user_license.", + // "flatPath": "projects/{project}/zones/{zone}/disks/{disk}", + // "httpMethod": "PATCH", + // "id": "compute.disks.update", + // "parameterOrder": [ + // "project", + // "zone", + // "disk" + // ], + // "parameters": { + // "disk": { + // "description": "The disk name for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "paths": { + // "location": "query", + // "repeated": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "updateMask": { + // "description": "update_mask indicates fields to be updated as part of this request.", + // "format": "google-fieldmask", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/zones/{zone}/disks/{disk}", + // "request": { + // "$ref": "Disk" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + // method id "compute.externalVpnGateways.delete": type ExternalVpnGatewaysDeleteCall struct { @@ -67009,17 +70566,17 @@ func (c *ExternalVpnGatewaysDeleteCall) Do(opts ...googleapi.CallOption) (*Opera if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -67174,17 +70731,17 @@ func (c *ExternalVpnGatewaysGetCall) Do(opts ...googleapi.CallOption) (*External if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ExternalVpnGateway{ ServerResponse: googleapi.ServerResponse{ @@ -67340,17 +70897,17 @@ func (c *ExternalVpnGatewaysInsertCall) Do(opts ...googleapi.CallOption) (*Opera if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -67578,17 +71135,17 @@ func (c *ExternalVpnGatewaysListCall) Do(opts ...googleapi.CallOption) (*Externa if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ExternalVpnGatewayList{ ServerResponse: googleapi.ServerResponse{ @@ -67773,17 +71330,17 @@ func (c *ExternalVpnGatewaysSetLabelsCall) Do(opts ...googleapi.CallOption) (*Op if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -67929,17 +71486,17 @@ func (c *ExternalVpnGatewaysTestIamPermissionsCall) Do(opts ...googleapi.CallOpt if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ @@ -68108,17 +71665,17 @@ func (c *FirewallPoliciesAddAssociationCall) Do(opts ...googleapi.CallOption) (* if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -68277,17 +71834,17 @@ func (c *FirewallPoliciesAddRuleCall) Do(opts ...googleapi.CallOption) (*Operati if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -68441,17 +71998,17 @@ func (c *FirewallPoliciesCloneRulesCall) Do(opts ...googleapi.CallOption) (*Oper if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -68600,17 +72157,17 @@ func (c *FirewallPoliciesDeleteCall) Do(opts ...googleapi.CallOption) (*Operatio if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -68752,17 +72309,17 @@ func (c *FirewallPoliciesGetCall) Do(opts ...googleapi.CallOption) (*FirewallPol if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &FirewallPolicy{ ServerResponse: googleapi.ServerResponse{ @@ -68818,8 +72375,8 @@ type FirewallPoliciesGetAssociationCall struct { // GetAssociation: Gets an association with the specified name. // -// - firewallPolicy: Name of the firewall policy to which the queried -// rule belongs. +// - firewallPolicy: Name of the firewall policy to which the queried +// rule belongs. func (r *FirewallPoliciesService) GetAssociation(firewallPolicy string) *FirewallPoliciesGetAssociationCall { c := &FirewallPoliciesGetAssociationCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.firewallPolicy = firewallPolicy @@ -68908,17 +72465,17 @@ func (c *FirewallPoliciesGetAssociationCall) Do(opts ...googleapi.CallOption) (* if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &FirewallPolicyAssociation{ ServerResponse: googleapi.ServerResponse{ @@ -69069,17 +72626,17 @@ func (c *FirewallPoliciesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Po if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Policy{ ServerResponse: googleapi.ServerResponse{ @@ -69141,8 +72698,8 @@ type FirewallPoliciesGetRuleCall struct { // GetRule: Gets a rule of the specified priority. // -// - firewallPolicy: Name of the firewall policy to which the queried -// rule belongs. +// - firewallPolicy: Name of the firewall policy to which the queried +// rule belongs. func (r *FirewallPoliciesService) GetRule(firewallPolicy string) *FirewallPoliciesGetRuleCall { c := &FirewallPoliciesGetRuleCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.firewallPolicy = firewallPolicy @@ -69231,17 +72788,17 @@ func (c *FirewallPoliciesGetRuleCall) Do(opts ...googleapi.CallOption) (*Firewal if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &FirewallPolicyRule{ ServerResponse: googleapi.ServerResponse{ @@ -69397,17 +72954,17 @@ func (c *FirewallPoliciesInsertCall) Do(opts ...googleapi.CallOption) (*Operatio if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -69543,7 +73100,9 @@ func (c *FirewallPoliciesListCall) PageToken(pageToken string) *FirewallPolicies } // ParentId sets the optional parameter "parentId": Parent ID for this -// request. +// request. The ID can be either be "folders/[FOLDER_ID]" if the parent +// is a folder or "organizations/[ORGANIZATION_ID]" if the parent is an +// organization. func (c *FirewallPoliciesListCall) ParentId(parentId string) *FirewallPoliciesListCall { c.urlParams_.Set("parentId", parentId) return c @@ -69630,17 +73189,17 @@ func (c *FirewallPoliciesListCall) Do(opts ...googleapi.CallOption) (*FirewallPo if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &FirewallPolicyList{ ServerResponse: googleapi.ServerResponse{ @@ -69683,7 +73242,7 @@ func (c *FirewallPoliciesListCall) Do(opts ...googleapi.CallOption) (*FirewallPo // "type": "string" // }, // "parentId": { - // "description": "Parent ID for this request.", + // "description": "Parent ID for this request. The ID can be either be \"folders/[FOLDER_ID]\" if the parent is a folder or \"organizations/[ORGANIZATION_ID]\" if the parent is an organization.", // "location": "query", // "type": "string" // }, @@ -69826,17 +73385,17 @@ func (c *FirewallPoliciesListAssociationsCall) Do(opts ...googleapi.CallOption) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &FirewallPoliciesListAssociationsResponse{ ServerResponse: googleapi.ServerResponse{ @@ -69894,7 +73453,9 @@ func (r *FirewallPoliciesService) Move(firewallPolicy string) *FirewallPoliciesM } // ParentId sets the optional parameter "parentId": The new parent of -// the firewall policy. +// the firewall policy. The ID can be either be "folders/[FOLDER_ID]" if +// the parent is a folder or "organizations/[ORGANIZATION_ID]" if the +// parent is an organization. func (c *FirewallPoliciesMoveCall) ParentId(parentId string) *FirewallPoliciesMoveCall { c.urlParams_.Set("parentId", parentId) return c @@ -69978,17 +73539,17 @@ func (c *FirewallPoliciesMoveCall) Do(opts ...googleapi.CallOption) (*Operation, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -70018,7 +73579,7 @@ func (c *FirewallPoliciesMoveCall) Do(opts ...googleapi.CallOption) (*Operation, // "type": "string" // }, // "parentId": { - // "description": "The new parent of the firewall policy.", + // "description": "The new parent of the firewall policy. The ID can be either be \"folders/[FOLDER_ID]\" if the parent is a folder or \"organizations/[ORGANIZATION_ID]\" if the parent is an organization.", // "location": "query", // "type": "string" // }, @@ -70145,17 +73706,17 @@ func (c *FirewallPoliciesPatchCall) Do(opts ...googleapi.CallOption) (*Operation if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -70316,17 +73877,17 @@ func (c *FirewallPoliciesPatchRuleCall) Do(opts ...googleapi.CallOption) (*Opera if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -70487,17 +74048,17 @@ func (c *FirewallPoliciesRemoveAssociationCall) Do(opts ...googleapi.CallOption) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -70653,17 +74214,17 @@ func (c *FirewallPoliciesRemoveRuleCall) Do(opts ...googleapi.CallOption) (*Oper if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -70805,17 +74366,17 @@ func (c *FirewallPoliciesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Po if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Policy{ ServerResponse: googleapi.ServerResponse{ @@ -70949,17 +74510,17 @@ func (c *FirewallPoliciesTestIamPermissionsCall) Do(opts ...googleapi.CallOption if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ @@ -71106,17 +74667,17 @@ func (c *FirewallsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, erro if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -71270,17 +74831,17 @@ func (c *FirewallsGetCall) Do(opts ...googleapi.CallOption) (*Firewall, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Firewall{ ServerResponse: googleapi.ServerResponse{ @@ -71436,17 +74997,17 @@ func (c *FirewallsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, erro if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -71674,17 +75235,17 @@ func (c *FirewallsListCall) Do(opts ...googleapi.CallOption) (*FirewallList, err if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &FirewallList{ ServerResponse: googleapi.ServerResponse{ @@ -71886,17 +75447,17 @@ func (c *FirewallsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -72065,17 +75626,17 @@ func (c *FirewallsUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, erro if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -72323,17 +75884,17 @@ func (c *ForwardingRulesAggregatedListCall) Do(opts ...googleapi.CallOption) (*F if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ForwardingRuleAggregatedList{ ServerResponse: googleapi.ServerResponse{ @@ -72535,17 +76096,17 @@ func (c *ForwardingRulesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -72711,17 +76272,17 @@ func (c *ForwardingRulesGetCall) Do(opts ...googleapi.CallOption) (*ForwardingRu if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ForwardingRule{ ServerResponse: googleapi.ServerResponse{ @@ -72889,17 +76450,17 @@ func (c *ForwardingRulesInsertCall) Do(opts ...googleapi.CallOption) (*Operation if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -73139,17 +76700,17 @@ func (c *ForwardingRulesListCall) Do(opts ...googleapi.CallOption) (*ForwardingR if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ForwardingRuleList{ ServerResponse: googleapi.ServerResponse{ @@ -73364,17 +76925,17 @@ func (c *ForwardingRulesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -73553,17 +77114,17 @@ func (c *ForwardingRulesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operat if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -73645,10 +77206,10 @@ type ForwardingRulesSetTargetCall struct { // SetTarget: Changes target URL for forwarding rule. The new target // should be of the same type as the old target. // -// - forwardingRule: Name of the ForwardingRule resource in which target -// is to be set. -// - project: Project ID for this request. -// - region: Name of the region scoping this request. +// - forwardingRule: Name of the ForwardingRule resource in which target +// is to be set. +// - project: Project ID for this request. +// - region: Name of the region scoping this request. func (r *ForwardingRulesService) SetTarget(project string, region string, forwardingRule string, targetreference *TargetReference) *ForwardingRulesSetTargetCall { c := &ForwardingRulesSetTargetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -73743,17 +77304,17 @@ func (c *ForwardingRulesSetTargetCall) Do(opts ...googleapi.CallOption) (*Operat if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -73920,17 +77481,17 @@ func (c *GlobalAddressesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -73997,8 +77558,7 @@ type GlobalAddressesGetCall struct { header_ http.Header } -// Get: Returns the specified address resource. Gets a list of available -// addresses by making a list() request. +// Get: Returns the specified address resource. // // - address: Name of the address resource to return. // - project: Project ID for this request. @@ -74085,17 +77645,17 @@ func (c *GlobalAddressesGetCall) Do(opts ...googleapi.CallOption) (*Address, err if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Address{ ServerResponse: googleapi.ServerResponse{ @@ -74109,7 +77669,7 @@ func (c *GlobalAddressesGetCall) Do(opts ...googleapi.CallOption) (*Address, err } return ret, nil // { - // "description": "Returns the specified address resource. Gets a list of available addresses by making a list() request.", + // "description": "Returns the specified address resource.", // "flatPath": "projects/{project}/global/addresses/{address}", // "httpMethod": "GET", // "id": "compute.globalAddresses.get", @@ -74251,17 +77811,17 @@ func (c *GlobalAddressesInsertCall) Do(opts ...googleapi.CallOption) (*Operation if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -74488,17 +78048,17 @@ func (c *GlobalAddressesListCall) Do(opts ...googleapi.CallOption) (*AddressList if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &AddressList{ ServerResponse: googleapi.ServerResponse{ @@ -74590,6 +78150,162 @@ func (c *GlobalAddressesListCall) Pages(ctx context.Context, f func(*AddressList } } +// method id "compute.globalAddresses.setLabels": + +type GlobalAddressesSetLabelsCall struct { + s *Service + project string + resource string + globalsetlabelsrequest *GlobalSetLabelsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetLabels: Sets the labels on a GlobalAddress. To learn more about +// labels, read the Labeling Resources documentation. +// +// - project: Project ID for this request. +// - resource: Name or id of the resource for this request. +func (r *GlobalAddressesService) SetLabels(project string, resource string, globalsetlabelsrequest *GlobalSetLabelsRequest) *GlobalAddressesSetLabelsCall { + c := &GlobalAddressesSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.resource = resource + c.globalsetlabelsrequest = globalsetlabelsrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *GlobalAddressesSetLabelsCall) Fields(s ...googleapi.Field) *GlobalAddressesSetLabelsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *GlobalAddressesSetLabelsCall) Context(ctx context.Context) *GlobalAddressesSetLabelsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *GlobalAddressesSetLabelsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *GlobalAddressesSetLabelsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.globalsetlabelsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/addresses/{resource}/setLabels") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.globalAddresses.setLabels" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *GlobalAddressesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Sets the labels on a GlobalAddress. To learn more about labels, read the Labeling Resources documentation.", + // "flatPath": "projects/{project}/global/addresses/{resource}/setLabels", + // "httpMethod": "POST", + // "id": "compute.globalAddresses.setLabels", + // "parameterOrder": [ + // "project", + // "resource" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/global/addresses/{resource}/setLabels", + // "request": { + // "$ref": "GlobalSetLabelsRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + // method id "compute.globalForwardingRules.delete": type GlobalForwardingRulesDeleteCall struct { @@ -74691,17 +78407,17 @@ func (c *GlobalForwardingRulesDeleteCall) Do(opts ...googleapi.CallOption) (*Ope if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -74856,17 +78572,17 @@ func (c *GlobalForwardingRulesGetCall) Do(opts ...googleapi.CallOption) (*Forwar if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ForwardingRule{ ServerResponse: googleapi.ServerResponse{ @@ -75022,17 +78738,17 @@ func (c *GlobalForwardingRulesInsertCall) Do(opts ...googleapi.CallOption) (*Ope if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -75260,17 +78976,17 @@ func (c *GlobalForwardingRulesListCall) Do(opts ...googleapi.CallOption) (*Forwa if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ForwardingRuleList{ ServerResponse: googleapi.ServerResponse{ @@ -75473,17 +79189,17 @@ func (c *GlobalForwardingRulesPatchCall) Do(opts ...googleapi.CallOption) (*Oper if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -75634,17 +79350,17 @@ func (c *GlobalForwardingRulesSetLabelsCall) Do(opts ...googleapi.CallOption) (* if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -75712,9 +79428,9 @@ type GlobalForwardingRulesSetTargetCall struct { // SetTarget: Changes target URL for the GlobalForwardingRule resource. // The new target should be of the same type as the old target. // -// - forwardingRule: Name of the ForwardingRule resource in which target -// is to be set. -// - project: Project ID for this request. +// - forwardingRule: Name of the ForwardingRule resource in which target +// is to be set. +// - project: Project ID for this request. func (r *GlobalForwardingRulesService) SetTarget(project string, forwardingRule string, targetreference *TargetReference) *GlobalForwardingRulesSetTargetCall { c := &GlobalForwardingRulesSetTargetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -75807,17 +79523,17 @@ func (c *GlobalForwardingRulesSetTargetCall) Do(opts ...googleapi.CallOption) (* if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -75890,10 +79606,10 @@ type GlobalNetworkEndpointGroupsAttachNetworkEndpointsCall struct { // AttachNetworkEndpoints: Attach a network endpoint to the specified // network endpoint group. // -// - networkEndpointGroup: The name of the network endpoint group where -// you are attaching network endpoints to. It should comply with -// RFC1035. -// - project: Project ID for this request. +// - networkEndpointGroup: The name of the network endpoint group where +// you are attaching network endpoints to. It should comply with +// RFC1035. +// - project: Project ID for this request. func (r *GlobalNetworkEndpointGroupsService) AttachNetworkEndpoints(project string, networkEndpointGroup string, globalnetworkendpointgroupsattachendpointsrequest *GlobalNetworkEndpointGroupsAttachEndpointsRequest) *GlobalNetworkEndpointGroupsAttachNetworkEndpointsCall { c := &GlobalNetworkEndpointGroupsAttachNetworkEndpointsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -75986,17 +79702,17 @@ func (c *GlobalNetworkEndpointGroupsAttachNetworkEndpointsCall) Do(opts ...googl if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -76067,9 +79783,9 @@ type GlobalNetworkEndpointGroupsDeleteCall struct { // Delete: Deletes the specified network endpoint group.Note that the // NEG cannot be deleted if there are backend services referencing it. // -// - networkEndpointGroup: The name of the network endpoint group to -// delete. It should comply with RFC1035. -// - project: Project ID for this request. +// - networkEndpointGroup: The name of the network endpoint group to +// delete. It should comply with RFC1035. +// - project: Project ID for this request. func (r *GlobalNetworkEndpointGroupsService) Delete(project string, networkEndpointGroup string) *GlobalNetworkEndpointGroupsDeleteCall { c := &GlobalNetworkEndpointGroupsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -76156,17 +79872,17 @@ func (c *GlobalNetworkEndpointGroupsDeleteCall) Do(opts ...googleapi.CallOption) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -76235,9 +79951,9 @@ type GlobalNetworkEndpointGroupsDetachNetworkEndpointsCall struct { // DetachNetworkEndpoints: Detach the network endpoint from the // specified network endpoint group. // -// - networkEndpointGroup: The name of the network endpoint group where -// you are removing network endpoints. It should comply with RFC1035. -// - project: Project ID for this request. +// - networkEndpointGroup: The name of the network endpoint group where +// you are removing network endpoints. It should comply with RFC1035. +// - project: Project ID for this request. func (r *GlobalNetworkEndpointGroupsService) DetachNetworkEndpoints(project string, networkEndpointGroup string, globalnetworkendpointgroupsdetachendpointsrequest *GlobalNetworkEndpointGroupsDetachEndpointsRequest) *GlobalNetworkEndpointGroupsDetachNetworkEndpointsCall { c := &GlobalNetworkEndpointGroupsDetachNetworkEndpointsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -76330,17 +80046,17 @@ func (c *GlobalNetworkEndpointGroupsDetachNetworkEndpointsCall) Do(opts ...googl if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -76409,12 +80125,11 @@ type GlobalNetworkEndpointGroupsGetCall struct { header_ http.Header } -// Get: Returns the specified network endpoint group. Gets a list of -// available network endpoint groups by making a list() request. +// Get: Returns the specified network endpoint group. // -// - networkEndpointGroup: The name of the network endpoint group. It -// should comply with RFC1035. -// - project: Project ID for this request. +// - networkEndpointGroup: The name of the network endpoint group. It +// should comply with RFC1035. +// - project: Project ID for this request. func (r *GlobalNetworkEndpointGroupsService) Get(project string, networkEndpointGroup string) *GlobalNetworkEndpointGroupsGetCall { c := &GlobalNetworkEndpointGroupsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -76498,17 +80213,17 @@ func (c *GlobalNetworkEndpointGroupsGetCall) Do(opts ...googleapi.CallOption) (* if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &NetworkEndpointGroup{ ServerResponse: googleapi.ServerResponse{ @@ -76522,7 +80237,7 @@ func (c *GlobalNetworkEndpointGroupsGetCall) Do(opts ...googleapi.CallOption) (* } return ret, nil // { - // "description": "Returns the specified network endpoint group. Gets a list of available network endpoint groups by making a list() request.", + // "description": "Returns the specified network endpoint group.", // "flatPath": "projects/{project}/global/networkEndpointGroups/{networkEndpointGroup}", // "httpMethod": "GET", // "id": "compute.globalNetworkEndpointGroups.get", @@ -76663,17 +80378,17 @@ func (c *GlobalNetworkEndpointGroupsInsertCall) Do(opts ...googleapi.CallOption) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -76901,17 +80616,17 @@ func (c *GlobalNetworkEndpointGroupsListCall) Do(opts ...googleapi.CallOption) ( if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &NetworkEndpointGroupList{ ServerResponse: googleapi.ServerResponse{ @@ -77017,10 +80732,10 @@ type GlobalNetworkEndpointGroupsListNetworkEndpointsCall struct { // ListNetworkEndpoints: Lists the network endpoints in the specified // network endpoint group. // -// - networkEndpointGroup: The name of the network endpoint group from -// which you want to generate a list of included network endpoints. It -// should comply with RFC1035. -// - project: Project ID for this request. +// - networkEndpointGroup: The name of the network endpoint group from +// which you want to generate a list of included network endpoints. It +// should comply with RFC1035. +// - project: Project ID for this request. func (r *GlobalNetworkEndpointGroupsService) ListNetworkEndpoints(project string, networkEndpointGroup string) *GlobalNetworkEndpointGroupsListNetworkEndpointsCall { c := &GlobalNetworkEndpointGroupsListNetworkEndpointsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -77175,17 +80890,17 @@ func (c *GlobalNetworkEndpointGroupsListNetworkEndpointsCall) Do(opts ...googlea if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &NetworkEndpointGroupsListNetworkEndpoints{ ServerResponse: googleapi.ServerResponse{ @@ -77474,17 +81189,17 @@ func (c *GlobalOperationsAggregatedListCall) Do(opts ...googleapi.CallOption) (* if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &OperationAggregatedList{ ServerResponse: googleapi.ServerResponse{ @@ -77661,7 +81376,7 @@ func (c *GlobalOperationsDeleteCall) Do(opts ...googleapi.CallOption) error { } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return err + return gensupport.WrapError(err) } return nil // { @@ -77797,17 +81512,17 @@ func (c *GlobalOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -78036,17 +81751,17 @@ func (c *GlobalOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationL if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &OperationList{ ServerResponse: googleapi.ServerResponse{ @@ -78234,17 +81949,17 @@ func (c *GlobalOperationsWaitCall) Do(opts ...googleapi.CallOption) (*Operation, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -78378,7 +82093,7 @@ func (c *GlobalOrganizationOperationsDeleteCall) Do(opts ...googleapi.CallOption } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return err + return gensupport.WrapError(err) } return nil // { @@ -78515,17 +82230,17 @@ func (c *GlobalOrganizationOperationsGetCall) Do(opts ...googleapi.CallOption) ( if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -78751,17 +82466,17 @@ func (c *GlobalOrganizationOperationsListCall) Do(opts ...googleapi.CallOption) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &OperationList{ ServerResponse: googleapi.ServerResponse{ @@ -78861,9 +82576,9 @@ type GlobalPublicDelegatedPrefixesDeleteCall struct { // Delete: Deletes the specified global PublicDelegatedPrefix. // -// - project: Project ID for this request. -// - publicDelegatedPrefix: Name of the PublicDelegatedPrefix resource -// to delete. +// - project: Project ID for this request. +// - publicDelegatedPrefix: Name of the PublicDelegatedPrefix resource +// to delete. func (r *GlobalPublicDelegatedPrefixesService) Delete(project string, publicDelegatedPrefix string) *GlobalPublicDelegatedPrefixesDeleteCall { c := &GlobalPublicDelegatedPrefixesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -78950,17 +82665,17 @@ func (c *GlobalPublicDelegatedPrefixesDeleteCall) Do(opts ...googleapi.CallOptio if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -79029,9 +82744,9 @@ type GlobalPublicDelegatedPrefixesGetCall struct { // Get: Returns the specified global PublicDelegatedPrefix resource. // -// - project: Project ID for this request. -// - publicDelegatedPrefix: Name of the PublicDelegatedPrefix resource -// to return. +// - project: Project ID for this request. +// - publicDelegatedPrefix: Name of the PublicDelegatedPrefix resource +// to return. func (r *GlobalPublicDelegatedPrefixesService) Get(project string, publicDelegatedPrefix string) *GlobalPublicDelegatedPrefixesGetCall { c := &GlobalPublicDelegatedPrefixesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -79115,17 +82830,17 @@ func (c *GlobalPublicDelegatedPrefixesGetCall) Do(opts ...googleapi.CallOption) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &PublicDelegatedPrefix{ ServerResponse: googleapi.ServerResponse{ @@ -79281,17 +82996,17 @@ func (c *GlobalPublicDelegatedPrefixesInsertCall) Do(opts ...googleapi.CallOptio if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -79518,17 +83233,17 @@ func (c *GlobalPublicDelegatedPrefixesListCall) Do(opts ...googleapi.CallOption) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &PublicDelegatedPrefixList{ ServerResponse: googleapi.ServerResponse{ @@ -79636,9 +83351,9 @@ type GlobalPublicDelegatedPrefixesPatchCall struct { // with the data included in the request. This method supports PATCH // semantics and uses JSON merge patch format and processing rules. // -// - project: Project ID for this request. -// - publicDelegatedPrefix: Name of the PublicDelegatedPrefix resource -// to patch. +// - project: Project ID for this request. +// - publicDelegatedPrefix: Name of the PublicDelegatedPrefix resource +// to patch. func (r *GlobalPublicDelegatedPrefixesService) Patch(project string, publicDelegatedPrefix string, publicdelegatedprefix *PublicDelegatedPrefix) *GlobalPublicDelegatedPrefixesPatchCall { c := &GlobalPublicDelegatedPrefixesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -79731,17 +83446,17 @@ func (c *GlobalPublicDelegatedPrefixesPatchCall) Do(opts ...googleapi.CallOption if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -79990,17 +83705,17 @@ func (c *HealthChecksAggregatedListCall) Do(opts ...googleapi.CallOption) (*Heal if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &HealthChecksAggregatedList{ ServerResponse: googleapi.ServerResponse{ @@ -80198,17 +83913,17 @@ func (c *HealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, e if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -80275,8 +83990,7 @@ type HealthChecksGetCall struct { header_ http.Header } -// Get: Returns the specified HealthCheck resource. Gets a list of -// available health checks by making a list() request. +// Get: Returns the specified HealthCheck resource. // // - healthCheck: Name of the HealthCheck resource to return. // - project: Project ID for this request. @@ -80363,17 +84077,17 @@ func (c *HealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HealthCheck, er if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &HealthCheck{ ServerResponse: googleapi.ServerResponse{ @@ -80387,7 +84101,7 @@ func (c *HealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HealthCheck, er } return ret, nil // { - // "description": "Returns the specified HealthCheck resource. Gets a list of available health checks by making a list() request.", + // "description": "Returns the specified HealthCheck resource.", // "flatPath": "projects/{project}/global/healthChecks/{healthCheck}", // "httpMethod": "GET", // "id": "compute.healthChecks.get", @@ -80529,17 +84243,17 @@ func (c *HealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, e if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -80767,17 +84481,17 @@ func (c *HealthChecksListCall) Do(opts ...googleapi.CallOption) (*HealthCheckLis if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &HealthCheckList{ ServerResponse: googleapi.ServerResponse{ @@ -80979,17 +84693,17 @@ func (c *HealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, er if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -81156,17 +84870,17 @@ func (c *HealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, e if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -81325,17 +85039,17 @@ func (c *HttpHealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operatio if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -81402,8 +85116,7 @@ type HttpHealthChecksGetCall struct { header_ http.Header } -// Get: Returns the specified HttpHealthCheck resource. Gets a list of -// available HTTP health checks by making a list() request. +// Get: Returns the specified HttpHealthCheck resource. // // - httpHealthCheck: Name of the HttpHealthCheck resource to return. // - project: Project ID for this request. @@ -81490,17 +85203,17 @@ func (c *HttpHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HttpHealthC if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &HttpHealthCheck{ ServerResponse: googleapi.ServerResponse{ @@ -81514,7 +85227,7 @@ func (c *HttpHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HttpHealthC } return ret, nil // { - // "description": "Returns the specified HttpHealthCheck resource. Gets a list of available HTTP health checks by making a list() request.", + // "description": "Returns the specified HttpHealthCheck resource.", // "flatPath": "projects/{project}/global/httpHealthChecks/{httpHealthCheck}", // "httpMethod": "GET", // "id": "compute.httpHealthChecks.get", @@ -81656,17 +85369,17 @@ func (c *HttpHealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operatio if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -81894,17 +85607,17 @@ func (c *HttpHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpHealth if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &HttpHealthCheckList{ ServerResponse: googleapi.ServerResponse{ @@ -82106,17 +85819,17 @@ func (c *HttpHealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operation if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -82283,17 +85996,17 @@ func (c *HttpHealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operatio if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -82452,17 +86165,17 @@ func (c *HttpsHealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operati if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -82529,8 +86242,7 @@ type HttpsHealthChecksGetCall struct { header_ http.Header } -// Get: Returns the specified HttpsHealthCheck resource. Gets a list of -// available HTTPS health checks by making a list() request. +// Get: Returns the specified HttpsHealthCheck resource. // // - httpsHealthCheck: Name of the HttpsHealthCheck resource to return. // - project: Project ID for this request. @@ -82617,17 +86329,17 @@ func (c *HttpsHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HttpsHealt if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &HttpsHealthCheck{ ServerResponse: googleapi.ServerResponse{ @@ -82641,7 +86353,7 @@ func (c *HttpsHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HttpsHealt } return ret, nil // { - // "description": "Returns the specified HttpsHealthCheck resource. Gets a list of available HTTPS health checks by making a list() request.", + // "description": "Returns the specified HttpsHealthCheck resource.", // "flatPath": "projects/{project}/global/httpsHealthChecks/{httpsHealthCheck}", // "httpMethod": "GET", // "id": "compute.httpsHealthChecks.get", @@ -82783,17 +86495,17 @@ func (c *HttpsHealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operati if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -83021,17 +86733,17 @@ func (c *HttpsHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpsHeal if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &HttpsHealthCheckList{ ServerResponse: googleapi.ServerResponse{ @@ -83233,17 +86945,17 @@ func (c *HttpsHealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operatio if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -83410,17 +87122,17 @@ func (c *HttpsHealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operati if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -83582,17 +87294,17 @@ func (c *ImageFamilyViewsGetCall) Do(opts ...googleapi.CallOption) (*ImageFamily if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ImageFamilyView{ ServerResponse: googleapi.ServerResponse{ @@ -83752,17 +87464,17 @@ func (c *ImagesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -83926,17 +87638,17 @@ func (c *ImagesDeprecateCall) Do(opts ...googleapi.CallOption) (*Operation, erro if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -84006,8 +87718,7 @@ type ImagesGetCall struct { header_ http.Header } -// Get: Returns the specified image. Gets a list of available images by -// making a list() request. +// Get: Returns the specified image. // // - image: Name of the image resource to return. // - project: Project ID for this request. @@ -84094,17 +87805,17 @@ func (c *ImagesGetCall) Do(opts ...googleapi.CallOption) (*Image, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Image{ ServerResponse: googleapi.ServerResponse{ @@ -84118,7 +87829,7 @@ func (c *ImagesGetCall) Do(opts ...googleapi.CallOption) (*Image, error) { } return ret, nil // { - // "description": "Returns the specified image. Gets a list of available images by making a list() request.", + // "description": "Returns the specified image.", // "flatPath": "projects/{project}/global/images/{image}", // "httpMethod": "GET", // "id": "compute.images.get", @@ -84168,10 +87879,12 @@ type ImagesGetFromFamilyCall struct { } // GetFromFamily: Returns the latest image that is part of an image -// family and is not deprecated. +// family and is not deprecated. For more information on image families, +// see Public image families documentation. // -// - family: Name of the image family to search for. -// - project: Project ID for this request. +// - family: Name of the image family to search for. +// - project: The image project that the image belongs to. For example, +// to get a CentOS image, specify centos-cloud as the image project. func (r *ImagesService) GetFromFamily(project string, family string) *ImagesGetFromFamilyCall { c := &ImagesGetFromFamilyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -84255,17 +87968,17 @@ func (c *ImagesGetFromFamilyCall) Do(opts ...googleapi.CallOption) (*Image, erro if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Image{ ServerResponse: googleapi.ServerResponse{ @@ -84279,7 +87992,7 @@ func (c *ImagesGetFromFamilyCall) Do(opts ...googleapi.CallOption) (*Image, erro } return ret, nil // { - // "description": "Returns the latest image that is part of an image family and is not deprecated.", + // "description": "Returns the latest image that is part of an image family and is not deprecated. For more information on image families, see Public image families documentation.", // "flatPath": "projects/{project}/global/images/family/{family}", // "httpMethod": "GET", // "id": "compute.images.getFromFamily", @@ -84296,7 +88009,7 @@ func (c *ImagesGetFromFamilyCall) Do(opts ...googleapi.CallOption) (*Image, erro // "type": "string" // }, // "project": { - // "description": "Project ID for this request.", + // "description": "The image project that the image belongs to. For example, to get a CentOS image, specify centos-cloud as the image project.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, @@ -84423,17 +88136,17 @@ func (c *ImagesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, erro if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Policy{ ServerResponse: googleapi.ServerResponse{ @@ -84602,17 +88315,17 @@ func (c *ImagesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -84853,17 +88566,17 @@ func (c *ImagesListCall) Do(opts ...googleapi.CallOption) (*ImageList, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ImageList{ ServerResponse: googleapi.ServerResponse{ @@ -85065,17 +88778,17 @@ func (c *ImagesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -85226,17 +88939,17 @@ func (c *ImagesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, erro if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Policy{ ServerResponse: googleapi.ServerResponse{ @@ -85382,17 +89095,17 @@ func (c *ImagesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, erro if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -85538,17 +89251,17 @@ func (c *ImagesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPe if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ @@ -85629,10 +89342,10 @@ type InstanceGroupManagersAbandonInstancesCall struct { // elapsed before the VM instance is removed or deleted. You can specify // a maximum of 1000 instances with this method per request. // -// - instanceGroupManager: The name of the managed instance group. -// - project: Project ID for this request. -// - zone: The name of the zone where the managed instance group is -// located. +// - instanceGroupManager: The name of the managed instance group. +// - project: Project ID for this request. +// - zone: The name of the zone where the managed instance group is +// located. func (r *InstanceGroupManagersService) AbandonInstances(project string, zone string, instanceGroupManager string, instancegroupmanagersabandoninstancesrequest *InstanceGroupManagersAbandonInstancesRequest) *InstanceGroupManagersAbandonInstancesCall { c := &InstanceGroupManagersAbandonInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -85727,17 +89440,17 @@ func (c *InstanceGroupManagersAbandonInstancesCall) Do(opts ...googleapi.CallOpt if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -85993,17 +89706,17 @@ func (c *InstanceGroupManagersAggregatedListCall) Do(opts ...googleapi.CallOptio if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &InstanceGroupManagerAggregatedList{ ServerResponse: googleapi.ServerResponse{ @@ -86117,11 +89830,11 @@ type InstanceGroupManagersApplyUpdatesToInstancesCall struct { // managed instance group. This method can be used to apply new // overrides and/or new versions. // -// - instanceGroupManager: The name of the managed instance group, -// should conform to RFC1035. -// - project: Project ID for this request. -// - zone: The name of the zone where the managed instance group is -// located. Should conform to RFC1035. +// - instanceGroupManager: The name of the managed instance group, +// should conform to RFC1035. +// - project: Project ID for this request. +// - zone: The name of the zone where the managed instance group is +// located. Should conform to RFC1035. func (r *InstanceGroupManagersService) ApplyUpdatesToInstances(project string, zone string, instanceGroupManager string, instancegroupmanagersapplyupdatesrequest *InstanceGroupManagersApplyUpdatesRequest) *InstanceGroupManagersApplyUpdatesToInstancesCall { c := &InstanceGroupManagersApplyUpdatesToInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -86200,17 +89913,17 @@ func (c *InstanceGroupManagersApplyUpdatesToInstancesCall) Do(opts ...googleapi. if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -86289,11 +90002,11 @@ type InstanceGroupManagersCreateInstancesCall struct { // actions take additional time. You must separately verify the status // of the creating or actions with the listmanagedinstances method. // -// - instanceGroupManager: The name of the managed instance group. It -// should conform to RFC1035. -// - project: Project ID for this request. -// - zone: The name of the zone where the managed instance group is -// located. It should conform to RFC1035. +// - instanceGroupManager: The name of the managed instance group. It +// should conform to RFC1035. +// - project: Project ID for this request. +// - zone: The name of the zone where the managed instance group is +// located. It should conform to RFC1035. func (r *InstanceGroupManagersService) CreateInstances(project string, zone string, instanceGroupManager string, instancegroupmanagerscreateinstancesrequest *InstanceGroupManagersCreateInstancesRequest) *InstanceGroupManagersCreateInstancesCall { c := &InstanceGroupManagersCreateInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -86387,17 +90100,17 @@ func (c *InstanceGroupManagersCreateInstancesCall) Do(opts ...googleapi.CallOpti if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -86478,11 +90191,11 @@ type InstanceGroupManagersDeleteCall struct { // to a backend service. Read Deleting an instance group for more // information. // -// - instanceGroupManager: The name of the managed instance group to -// delete. -// - project: Project ID for this request. -// - zone: The name of the zone where the managed instance group is -// located. +// - instanceGroupManager: The name of the managed instance group to +// delete. +// - project: Project ID for this request. +// - zone: The name of the zone where the managed instance group is +// located. func (r *InstanceGroupManagersService) Delete(project string, zone string, instanceGroupManager string) *InstanceGroupManagersDeleteCall { c := &InstanceGroupManagersDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -86571,17 +90284,17 @@ func (c *InstanceGroupManagersDeleteCall) Do(opts ...googleapi.CallOption) (*Ope if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -86668,10 +90381,10 @@ type InstanceGroupManagersDeleteInstancesCall struct { // VM instance is removed or deleted. You can specify a maximum of 1000 // instances with this method per request. // -// - instanceGroupManager: The name of the managed instance group. -// - project: Project ID for this request. -// - zone: The name of the zone where the managed instance group is -// located. +// - instanceGroupManager: The name of the managed instance group. +// - project: Project ID for this request. +// - zone: The name of the zone where the managed instance group is +// located. func (r *InstanceGroupManagersService) DeleteInstances(project string, zone string, instanceGroupManager string, instancegroupmanagersdeleteinstancesrequest *InstanceGroupManagersDeleteInstancesRequest) *InstanceGroupManagersDeleteInstancesCall { c := &InstanceGroupManagersDeleteInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -86766,17 +90479,17 @@ func (c *InstanceGroupManagersDeleteInstancesCall) Do(opts ...googleapi.CallOpti if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -86856,11 +90569,11 @@ type InstanceGroupManagersDeletePerInstanceConfigsCall struct { // DeletePerInstanceConfigs: Deletes selected per-instance // configurations for the managed instance group. // -// - instanceGroupManager: The name of the managed instance group. It -// should conform to RFC1035. -// - project: Project ID for this request. -// - zone: The name of the zone where the managed instance group is -// located. It should conform to RFC1035. +// - instanceGroupManager: The name of the managed instance group. It +// should conform to RFC1035. +// - project: Project ID for this request. +// - zone: The name of the zone where the managed instance group is +// located. It should conform to RFC1035. func (r *InstanceGroupManagersService) DeletePerInstanceConfigs(project string, zone string, instanceGroupManager string, instancegroupmanagersdeleteperinstanceconfigsreq *InstanceGroupManagersDeletePerInstanceConfigsReq) *InstanceGroupManagersDeletePerInstanceConfigsCall { c := &InstanceGroupManagersDeletePerInstanceConfigsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -86939,17 +90652,17 @@ func (c *InstanceGroupManagersDeletePerInstanceConfigsCall) Do(opts ...googleapi if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -87022,13 +90735,12 @@ type InstanceGroupManagersGetCall struct { } // Get: Returns all of the details about the specified managed instance -// group. Gets a list of available managed instance groups by making a -// list() request. +// group. // -// - instanceGroupManager: The name of the managed instance group. -// - project: Project ID for this request. -// - zone: The name of the zone where the managed instance group is -// located. +// - instanceGroupManager: The name of the managed instance group. +// - project: Project ID for this request. +// - zone: The name of the zone where the managed instance group is +// located. func (r *InstanceGroupManagersService) Get(project string, zone string, instanceGroupManager string) *InstanceGroupManagersGetCall { c := &InstanceGroupManagersGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -87114,17 +90826,17 @@ func (c *InstanceGroupManagersGetCall) Do(opts ...googleapi.CallOption) (*Instan if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &InstanceGroupManager{ ServerResponse: googleapi.ServerResponse{ @@ -87138,7 +90850,7 @@ func (c *InstanceGroupManagersGetCall) Do(opts ...googleapi.CallOption) (*Instan } return ret, nil // { - // "description": "Returns all of the details about the specified managed instance group. Gets a list of available managed instance groups by making a list() request.", + // "description": "Returns all of the details about the specified managed instance group.", // "flatPath": "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}", // "httpMethod": "GET", // "id": "compute.instanceGroupManagers.get", @@ -87203,9 +90915,9 @@ type InstanceGroupManagersInsertCall struct { // 1000 VM instances per group. Please contact Cloud Support if you need // an increase in this limit. // -// - project: Project ID for this request. -// - zone: The name of the zone where you want to create the managed -// instance group. +// - project: Project ID for this request. +// - zone: The name of the zone where you want to create the managed +// instance group. func (r *InstanceGroupManagersService) Insert(project string, zone string, instancegroupmanager *InstanceGroupManager) *InstanceGroupManagersInsertCall { c := &InstanceGroupManagersInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -87298,17 +91010,17 @@ func (c *InstanceGroupManagersInsertCall) Do(opts ...googleapi.CallOption) (*Ope if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -87380,9 +91092,9 @@ type InstanceGroupManagersListCall struct { // List: Retrieves a list of managed instance groups that are contained // within the specified project and zone. // -// - project: Project ID for this request. -// - zone: The name of the zone where the managed instance group is -// located. +// - project: Project ID for this request. +// - zone: The name of the zone where the managed instance group is +// located. func (r *InstanceGroupManagersService) List(project string, zone string) *InstanceGroupManagersListCall { c := &InstanceGroupManagersListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -87548,17 +91260,17 @@ func (c *InstanceGroupManagersListCall) Do(opts ...googleapi.CallOption) (*Insta if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &InstanceGroupManagerList{ ServerResponse: googleapi.ServerResponse{ @@ -87674,13 +91386,13 @@ type InstanceGroupManagersListErrorsCall struct { // given managed instance group. The filter and orderBy query parameters // are not supported. // -// - instanceGroupManager: The name of the managed instance group. It -// must be a string that meets the requirements in RFC1035, or an -// unsigned long integer: must match regexp pattern: (?:a-z -// (?:[-a-z0-9]{0,61}[a-z0-9])?)|1-9{0,19}. -// - project: Project ID for this request. -// - zone: The name of the zone where the managed instance group is -// located. It should conform to RFC1035. +// - instanceGroupManager: The name of the managed instance group. It +// must be a string that meets the requirements in RFC1035, or an +// unsigned long integer: must match regexp pattern: (?:a-z +// (?:[-a-z0-9]{0,61}[a-z0-9])?)|1-9{0,19}. +// - project: Project ID for this request. +// - zone: The name of the zone where the managed instance group is +// located. It should conform to RFC1035. func (r *InstanceGroupManagersService) ListErrors(project string, zone string, instanceGroupManager string) *InstanceGroupManagersListErrorsCall { c := &InstanceGroupManagersListErrorsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -87849,17 +91561,17 @@ func (c *InstanceGroupManagersListErrorsCall) Do(opts ...googleapi.CallOption) ( if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &InstanceGroupManagersListErrorsResponse{ ServerResponse: googleapi.ServerResponse{ @@ -87987,10 +91699,10 @@ type InstanceGroupManagersListManagedInstancesCall struct { // supported only in the alpha and beta API and only if the group's // `listManagedInstancesResults` field is set to `PAGINATED`. // -// - instanceGroupManager: The name of the managed instance group. -// - project: Project ID for this request. -// - zone: The name of the zone where the managed instance group is -// located. +// - instanceGroupManager: The name of the managed instance group. +// - project: Project ID for this request. +// - zone: The name of the zone where the managed instance group is +// located. func (r *InstanceGroupManagersService) ListManagedInstances(project string, zone string, instanceGroupManager string) *InstanceGroupManagersListManagedInstancesCall { c := &InstanceGroupManagersListManagedInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -88147,17 +91859,17 @@ func (c *InstanceGroupManagersListManagedInstancesCall) Do(opts ...googleapi.Cal if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &InstanceGroupManagersListManagedInstancesResponse{ ServerResponse: googleapi.ServerResponse{ @@ -88279,11 +91991,11 @@ type InstanceGroupManagersListPerInstanceConfigsCall struct { // defined for the managed instance group. The orderBy query parameter // is not supported. // -// - instanceGroupManager: The name of the managed instance group. It -// should conform to RFC1035. -// - project: Project ID for this request. -// - zone: The name of the zone where the managed instance group is -// located. It should conform to RFC1035. +// - instanceGroupManager: The name of the managed instance group. It +// should conform to RFC1035. +// - project: Project ID for this request. +// - zone: The name of the zone where the managed instance group is +// located. It should conform to RFC1035. func (r *InstanceGroupManagersService) ListPerInstanceConfigs(project string, zone string, instanceGroupManager string) *InstanceGroupManagersListPerInstanceConfigsCall { c := &InstanceGroupManagersListPerInstanceConfigsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -88429,7 +92141,9 @@ func (c *InstanceGroupManagersListPerInstanceConfigsCall) doRequest(alt string) // error will be non-nil. Any non-2xx status code is an error. Response // headers are in either // *InstanceGroupManagersListPerInstanceConfigsResp.ServerResponse.Header -// or (if a response was returned at all) in +// +// or (if a response was returned at all) in +// // error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check // whether the returned error was because http.StatusNotModified was // returned. @@ -88440,17 +92154,17 @@ func (c *InstanceGroupManagersListPerInstanceConfigsCall) Do(opts ...googleapi.C if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &InstanceGroupManagersListPerInstanceConfigsResp{ ServerResponse: googleapi.ServerResponse{ @@ -88581,10 +92295,10 @@ type InstanceGroupManagersPatchCall struct { // state of that VM. To learn how to apply an updated configuration to // the VMs in a MIG, see Updating instances in a MIG. // -// - instanceGroupManager: The name of the instance group manager. -// - project: Project ID for this request. -// - zone: The name of the zone where you want to create the managed -// instance group. +// - instanceGroupManager: The name of the instance group manager. +// - project: Project ID for this request. +// - zone: The name of the zone where you want to create the managed +// instance group. func (r *InstanceGroupManagersService) Patch(project string, zone string, instanceGroupManager string, instancegroupmanager *InstanceGroupManager) *InstanceGroupManagersPatchCall { c := &InstanceGroupManagersPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -88679,17 +92393,17 @@ func (c *InstanceGroupManagersPatchCall) Do(opts ...googleapi.CallOption) (*Oper if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -88771,11 +92485,11 @@ type InstanceGroupManagersPatchPerInstanceConfigsCall struct { // serves as a key used to distinguish whether to perform insert or // patch. // -// - instanceGroupManager: The name of the managed instance group. It -// should conform to RFC1035. -// - project: Project ID for this request. -// - zone: The name of the zone where the managed instance group is -// located. It should conform to RFC1035. +// - instanceGroupManager: The name of the managed instance group. It +// should conform to RFC1035. +// - project: Project ID for this request. +// - zone: The name of the zone where the managed instance group is +// located. It should conform to RFC1035. func (r *InstanceGroupManagersService) PatchPerInstanceConfigs(project string, zone string, instanceGroupManager string, instancegroupmanagerspatchperinstanceconfigsreq *InstanceGroupManagersPatchPerInstanceConfigsReq) *InstanceGroupManagersPatchPerInstanceConfigsCall { c := &InstanceGroupManagersPatchPerInstanceConfigsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -88870,17 +92584,17 @@ func (c *InstanceGroupManagersPatchPerInstanceConfigsCall) Do(opts ...googleapi. if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -88969,10 +92683,10 @@ type InstanceGroupManagersRecreateInstancesCall struct { // before the VM instance is removed or deleted. You can specify a // maximum of 1000 instances with this method per request. // -// - instanceGroupManager: The name of the managed instance group. -// - project: Project ID for this request. -// - zone: The name of the zone where the managed instance group is -// located. +// - instanceGroupManager: The name of the managed instance group. +// - project: Project ID for this request. +// - zone: The name of the zone where the managed instance group is +// located. func (r *InstanceGroupManagersService) RecreateInstances(project string, zone string, instanceGroupManager string, instancegroupmanagersrecreateinstancesrequest *InstanceGroupManagersRecreateInstancesRequest) *InstanceGroupManagersRecreateInstancesCall { c := &InstanceGroupManagersRecreateInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -89067,17 +92781,17 @@ func (c *InstanceGroupManagersRecreateInstancesCall) Do(opts ...googleapi.CallOp if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -89170,14 +92884,14 @@ type InstanceGroupManagersResizeCall struct { // seconds after the connection draining duration has elapsed before the // VM instance is removed or deleted. // -// - instanceGroupManager: The name of the managed instance group. -// - project: Project ID for this request. -// - size: The number of running instances that the managed instance -// group should maintain at any given time. The group automatically -// adds or removes instances to maintain the number of instances -// specified by this parameter. -// - zone: The name of the zone where the managed instance group is -// located. +// - instanceGroupManager: The name of the managed instance group. +// - project: Project ID for this request. +// - size: The number of running instances that the managed instance +// group should maintain at any given time. The group automatically +// adds or removes instances to maintain the number of instances +// specified by this parameter. +// - zone: The name of the zone where the managed instance group is +// located. func (r *InstanceGroupManagersService) Resize(project string, zone string, instanceGroupManager string, size int64) *InstanceGroupManagersResizeCall { c := &InstanceGroupManagersResizeCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -89267,17 +92981,17 @@ func (c *InstanceGroupManagersResizeCall) Do(opts ...googleapi.CallOption) (*Ope if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -89365,10 +93079,10 @@ type InstanceGroupManagersSetInstanceTemplateCall struct { // recreateInstances, run applyUpdatesToInstances, or set the group's // updatePolicy.type to PROACTIVE. // -// - instanceGroupManager: The name of the managed instance group. -// - project: Project ID for this request. -// - zone: The name of the zone where the managed instance group is -// located. +// - instanceGroupManager: The name of the managed instance group. +// - project: Project ID for this request. +// - zone: The name of the zone where the managed instance group is +// located. func (r *InstanceGroupManagersService) SetInstanceTemplate(project string, zone string, instanceGroupManager string, instancegroupmanagerssetinstancetemplaterequest *InstanceGroupManagersSetInstanceTemplateRequest) *InstanceGroupManagersSetInstanceTemplateCall { c := &InstanceGroupManagersSetInstanceTemplateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -89463,17 +93177,17 @@ func (c *InstanceGroupManagersSetInstanceTemplateCall) Do(opts ...googleapi.Call if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -89558,10 +93272,10 @@ type InstanceGroupManagersSetTargetPoolsCall struct { // change might take some time to apply to all of the instances in the // group depending on the size of the group. // -// - instanceGroupManager: The name of the managed instance group. -// - project: Project ID for this request. -// - zone: The name of the zone where the managed instance group is -// located. +// - instanceGroupManager: The name of the managed instance group. +// - project: Project ID for this request. +// - zone: The name of the zone where the managed instance group is +// located. func (r *InstanceGroupManagersService) SetTargetPools(project string, zone string, instanceGroupManager string, instancegroupmanagerssettargetpoolsrequest *InstanceGroupManagersSetTargetPoolsRequest) *InstanceGroupManagersSetTargetPoolsCall { c := &InstanceGroupManagersSetTargetPoolsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -89656,17 +93370,17 @@ func (c *InstanceGroupManagersSetTargetPoolsCall) Do(opts ...googleapi.CallOptio if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -89748,11 +93462,11 @@ type InstanceGroupManagersUpdatePerInstanceConfigsCall struct { // serves as a key used to distinguish whether to perform insert or // patch. // -// - instanceGroupManager: The name of the managed instance group. It -// should conform to RFC1035. -// - project: Project ID for this request. -// - zone: The name of the zone where the managed instance group is -// located. It should conform to RFC1035. +// - instanceGroupManager: The name of the managed instance group. It +// should conform to RFC1035. +// - project: Project ID for this request. +// - zone: The name of the zone where the managed instance group is +// located. It should conform to RFC1035. func (r *InstanceGroupManagersService) UpdatePerInstanceConfigs(project string, zone string, instanceGroupManager string, instancegroupmanagersupdateperinstanceconfigsreq *InstanceGroupManagersUpdatePerInstanceConfigsReq) *InstanceGroupManagersUpdatePerInstanceConfigsCall { c := &InstanceGroupManagersUpdatePerInstanceConfigsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -89847,17 +93561,17 @@ func (c *InstanceGroupManagersUpdatePerInstanceConfigsCall) Do(opts ...googleapi if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -89938,10 +93652,10 @@ type InstanceGroupsAddInstancesCall struct { // group. All of the instances in the instance group must be in the same // network/subnetwork. Read Adding instances for more information. // -// - instanceGroup: The name of the instance group where you are adding -// instances. -// - project: Project ID for this request. -// - zone: The name of the zone where the instance group is located. +// - instanceGroup: The name of the instance group where you are adding +// instances. +// - project: Project ID for this request. +// - zone: The name of the zone where the instance group is located. func (r *InstanceGroupsService) AddInstances(project string, zone string, instanceGroup string, instancegroupsaddinstancesrequest *InstanceGroupsAddInstancesRequest) *InstanceGroupsAddInstancesCall { c := &InstanceGroupsAddInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -90036,17 +93750,17 @@ func (c *InstanceGroupsAddInstancesCall) Do(opts ...googleapi.CallOption) (*Oper if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -90301,17 +94015,17 @@ func (c *InstanceGroupsAggregatedListCall) Do(opts ...googleapi.CallOption) (*In if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &InstanceGroupAggregatedList{ ServerResponse: googleapi.ServerResponse{ @@ -90516,17 +94230,17 @@ func (c *InstanceGroupsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -90693,17 +94407,17 @@ func (c *InstanceGroupsGetCall) Do(opts ...googleapi.CallOption) (*InstanceGroup if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &InstanceGroup{ ServerResponse: googleapi.ServerResponse{ @@ -90775,9 +94489,9 @@ type InstanceGroupsInsertCall struct { // Insert: Creates an instance group in the specified project using the // parameters that are included in the request. // -// - project: Project ID for this request. -// - zone: The name of the zone where you want to create the instance -// group. +// - project: Project ID for this request. +// - zone: The name of the zone where you want to create the instance +// group. func (r *InstanceGroupsService) Insert(project string, zone string, instancegroup *InstanceGroup) *InstanceGroupsInsertCall { c := &InstanceGroupsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -90870,17 +94584,17 @@ func (c *InstanceGroupsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -91120,17 +94834,17 @@ func (c *InstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*InstanceGrou if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &InstanceGroupList{ ServerResponse: googleapi.ServerResponse{ @@ -91247,10 +94961,10 @@ type InstanceGroupsListInstancesCall struct { // parameter is supported, but only for expressions that use `eq` // (equal) or `ne` (not equal) operators. // -// - instanceGroup: The name of the instance group from which you want -// to generate a list of included instances. -// - project: Project ID for this request. -// - zone: The name of the zone where the instance group is located. +// - instanceGroup: The name of the instance group from which you want +// to generate a list of included instances. +// - project: Project ID for this request. +// - zone: The name of the zone where the instance group is located. func (r *InstanceGroupsService) ListInstances(project string, zone string, instanceGroup string, instancegroupslistinstancesrequest *InstanceGroupsListInstancesRequest) *InstanceGroupsListInstancesCall { c := &InstanceGroupsListInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -91411,17 +95125,17 @@ func (c *InstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) (*Ins if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &InstanceGroupsListInstances{ ServerResponse: googleapi.ServerResponse{ @@ -91549,10 +95263,10 @@ type InstanceGroupsRemoveInstancesCall struct { // can take up to 60 seconds after the connection draining duration // before the VM instance is removed or deleted. // -// - instanceGroup: The name of the instance group where the specified -// instances will be removed. -// - project: Project ID for this request. -// - zone: The name of the zone where the instance group is located. +// - instanceGroup: The name of the instance group where the specified +// instances will be removed. +// - project: Project ID for this request. +// - zone: The name of the zone where the instance group is located. func (r *InstanceGroupsService) RemoveInstances(project string, zone string, instanceGroup string, instancegroupsremoveinstancesrequest *InstanceGroupsRemoveInstancesRequest) *InstanceGroupsRemoveInstancesCall { c := &InstanceGroupsRemoveInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -91647,17 +95361,17 @@ func (c *InstanceGroupsRemoveInstancesCall) Do(opts ...googleapi.CallOption) (*O if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -91736,10 +95450,10 @@ type InstanceGroupsSetNamedPortsCall struct { // SetNamedPorts: Sets the named ports for the specified instance group. // -// - instanceGroup: The name of the instance group where the named ports -// are updated. -// - project: Project ID for this request. -// - zone: The name of the zone where the instance group is located. +// - instanceGroup: The name of the instance group where the named ports +// are updated. +// - project: Project ID for this request. +// - zone: The name of the zone where the instance group is located. func (r *InstanceGroupsService) SetNamedPorts(project string, zone string, instanceGroup string, instancegroupssetnamedportsrequest *InstanceGroupsSetNamedPortsRequest) *InstanceGroupsSetNamedPortsCall { c := &InstanceGroupsSetNamedPortsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -91834,17 +95548,17 @@ func (c *InstanceGroupsSetNamedPortsCall) Do(opts ...googleapi.CallOption) (*Ope if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -91908,6 +95622,304 @@ func (c *InstanceGroupsSetNamedPortsCall) Do(opts ...googleapi.CallOption) (*Ope } +// method id "compute.instanceTemplates.aggregatedList": + +type InstanceTemplatesAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// AggregatedList: Retrieves the list of all InstanceTemplates +// resources, regional and global, available to the specified project. +// +// - project: Name of the project scoping this request. +func (r *InstanceTemplatesService) AggregatedList(project string) *InstanceTemplatesAggregatedListCall { + c := &InstanceTemplatesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic +// restarts. You can use filtering on nested fields to filter based on +// resource labels. To filter on multiple expressions, provide each +// separate expression within parentheses. For example: ``` +// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") +// ``` By default, each expression is an `AND` expression. However, you +// can include `AND` and `OR` expressions explicitly. For example: ``` +// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. +func (c *InstanceTemplatesAggregatedListCall) Filter(filter string) *InstanceTemplatesAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// IncludeAllScopes sets the optional parameter "includeAllScopes": +// Indicates whether every visible scope for each scope type (zone, +// region, global) should be included in the response. For new resource +// types added after this field, the flag has no effect as new resource +// types will always include every visible scope for each scope type in +// response. For resource types which predate this field, if this flag +// is omitted or false, only scopes of the scope types where the +// resource type is expected to be found will be included. +func (c *InstanceTemplatesAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *InstanceTemplatesAggregatedListCall { + c.urlParams_.Set("includeAllScopes", fmt.Sprint(includeAllScopes)) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *InstanceTemplatesAggregatedListCall) MaxResults(maxResults int64) *InstanceTemplatesAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. You can also sort results in +// descending order based on the creation timestamp using +// `orderBy="creationTimestamp desc". This sorts results based on the +// `creationTimestamp` field in reverse chronological order (newest +// result first). Use this to sort resources like operations so that the +// newest operation is returned first. Currently, only sorting by `name` +// or `creationTimestamp desc` is supported. +func (c *InstanceTemplatesAggregatedListCall) OrderBy(orderBy string) *InstanceTemplatesAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *InstanceTemplatesAggregatedListCall) PageToken(pageToken string) *InstanceTemplatesAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false. +func (c *InstanceTemplatesAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *InstanceTemplatesAggregatedListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstanceTemplatesAggregatedListCall) Fields(s ...googleapi.Field) *InstanceTemplatesAggregatedListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InstanceTemplatesAggregatedListCall) IfNoneMatch(entityTag string) *InstanceTemplatesAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstanceTemplatesAggregatedListCall) Context(ctx context.Context) *InstanceTemplatesAggregatedListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InstanceTemplatesAggregatedListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InstanceTemplatesAggregatedListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/aggregated/instanceTemplates") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.instanceTemplates.aggregatedList" call. +// Exactly one of *InstanceTemplateAggregatedList or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *InstanceTemplateAggregatedList.ServerResponse.Header or (if a +// response was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InstanceTemplatesAggregatedListCall) Do(opts ...googleapi.CallOption) (*InstanceTemplateAggregatedList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &InstanceTemplateAggregatedList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of all InstanceTemplates resources, regional and global, available to the specified project.", + // "flatPath": "projects/{project}/aggregated/instanceTemplates", + // "httpMethod": "GET", + // "id": "compute.instanceTemplates.aggregatedList", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "location": "query", + // "type": "string" + // }, + // "includeAllScopes": { + // "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + // "location": "query", + // "type": "boolean" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Name of the project scoping this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + // "location": "query", + // "type": "boolean" + // } + // }, + // "path": "projects/{project}/aggregated/instanceTemplates", + // "response": { + // "$ref": "InstanceTemplateAggregatedList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *InstanceTemplatesAggregatedListCall) Pages(ctx context.Context, f func(*InstanceTemplateAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + // method id "compute.instanceTemplates.delete": type InstanceTemplatesDeleteCall struct { @@ -92011,17 +96023,17 @@ func (c *InstanceTemplatesDeleteCall) Do(opts ...googleapi.CallOption) (*Operati if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -92088,8 +96100,7 @@ type InstanceTemplatesGetCall struct { header_ http.Header } -// Get: Returns the specified instance template. Gets a list of -// available instance templates by making a list() request. +// Get: Returns the specified instance template. // // - instanceTemplate: The name of the instance template. // - project: Project ID for this request. @@ -92176,17 +96187,17 @@ func (c *InstanceTemplatesGetCall) Do(opts ...googleapi.CallOption) (*InstanceTe if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &InstanceTemplate{ ServerResponse: googleapi.ServerResponse{ @@ -92200,7 +96211,7 @@ func (c *InstanceTemplatesGetCall) Do(opts ...googleapi.CallOption) (*InstanceTe } return ret, nil // { - // "description": "Returns the specified instance template. Gets a list of available instance templates by making a list() request.", + // "description": "Returns the specified instance template.", // "flatPath": "projects/{project}/global/instanceTemplates/{instanceTemplate}", // "httpMethod": "GET", // "id": "compute.instanceTemplates.get", @@ -92344,17 +96355,17 @@ func (c *InstanceTemplatesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*P if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Policy{ ServerResponse: googleapi.ServerResponse{ @@ -92519,17 +96530,17 @@ func (c *InstanceTemplatesInsertCall) Do(opts ...googleapi.CallOption) (*Operati if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -92757,17 +96768,17 @@ func (c *InstanceTemplatesListCall) Do(opts ...googleapi.CallOption) (*InstanceT if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &InstanceTemplateList{ ServerResponse: googleapi.ServerResponse{ @@ -92952,17 +96963,17 @@ func (c *InstanceTemplatesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*P if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Policy{ ServerResponse: googleapi.ServerResponse{ @@ -93108,17 +97119,17 @@ func (c *InstanceTemplatesTestIamPermissionsCall) Do(opts ...googleapi.CallOptio if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ @@ -93188,11 +97199,11 @@ type InstancesAddAccessConfigCall struct { // AddAccessConfig: Adds an access config to an instance's network // interface. // -// - instance: The instance name for this request. -// - networkInterface: The name of the network interface to add to this -// instance. -// - project: Project ID for this request. -// - zone: The name of the zone for this request. +// - instance: The instance name for this request. +// - networkInterface: The name of the network interface to add to this +// instance. +// - project: Project ID for this request. +// - zone: The name of the zone for this request. func (r *InstancesService) AddAccessConfig(project string, zone string, instance string, networkInterface string, accessconfig *AccessConfig) *InstancesAddAccessConfigCall { c := &InstancesAddAccessConfigCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -93288,17 +97299,17 @@ func (c *InstancesAddAccessConfigCall) Do(opts ...googleapi.CallOption) (*Operat if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -93485,17 +97496,17 @@ func (c *InstancesAddResourcePoliciesCall) Do(opts ...googleapi.CallOption) (*Op if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -93754,17 +97765,17 @@ func (c *InstancesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Instanc if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &InstanceAggregatedList{ ServerResponse: googleapi.ServerResponse{ @@ -93985,17 +97996,17 @@ func (c *InstancesAttachDiskCall) Do(opts ...googleapi.CallOption) (*Operation, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -94176,17 +98187,17 @@ func (c *InstancesBulkInsertCall) Do(opts ...googleapi.CallOption) (*Operation, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -94350,17 +98361,17 @@ func (c *InstancesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, erro if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -94533,17 +98544,17 @@ func (c *InstancesDeleteAccessConfigCall) Do(opts ...googleapi.CallOption) (*Ope if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -94634,12 +98645,12 @@ type InstancesDetachDiskCall struct { // DetachDisk: Detaches a disk from an instance. // -// - deviceName: The device name of the disk to detach. Make a get() -// request on the instance to view currently attached disks and device -// names. -// - instance: Instance name for this request. -// - project: Project ID for this request. -// - zone: The name of the zone for this request. +// - deviceName: The device name of the disk to detach. Make a get() +// request on the instance to view currently attached disks and device +// names. +// - instance: Instance name for this request. +// - project: Project ID for this request. +// - zone: The name of the zone for this request. func (r *InstancesService) DetachDisk(project string, zone string, instance string, deviceName string) *InstancesDetachDiskCall { c := &InstancesDetachDiskCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -94729,17 +98740,17 @@ func (c *InstancesDetachDiskCall) Do(opts ...googleapi.CallOption) (*Operation, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -94822,8 +98833,7 @@ type InstancesGetCall struct { header_ http.Header } -// Get: Returns the specified Instance resource. Gets a list of -// available instances by making a list() request. +// Get: Returns the specified Instance resource. // // - instance: Name of the instance resource to return. // - project: Project ID for this request. @@ -94913,17 +98923,17 @@ func (c *InstancesGetCall) Do(opts ...googleapi.CallOption) (*Instance, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Instance{ ServerResponse: googleapi.ServerResponse{ @@ -94937,7 +98947,7 @@ func (c *InstancesGetCall) Do(opts ...googleapi.CallOption) (*Instance, error) { } return ret, nil // { - // "description": "Returns the specified Instance resource. Gets a list of available instances by making a list() request.", + // "description": "Returns the specified Instance resource.", // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}", // "httpMethod": "GET", // "id": "compute.instances.get", @@ -94998,11 +99008,11 @@ type InstancesGetEffectiveFirewallsCall struct { // GetEffectiveFirewalls: Returns effective firewalls applied to an // interface of the instance. // -// - instance: Name of the instance scoping this request. -// - networkInterface: The name of the network interface to get the -// effective firewalls. -// - project: Project ID for this request. -// - zone: The name of the zone for this request. +// - instance: Name of the instance scoping this request. +// - networkInterface: The name of the network interface to get the +// effective firewalls. +// - project: Project ID for this request. +// - zone: The name of the zone for this request. func (r *InstancesService) GetEffectiveFirewalls(project string, zone string, instance string, networkInterface string) *InstancesGetEffectiveFirewallsCall { c := &InstancesGetEffectiveFirewallsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -95090,17 +99100,17 @@ func (c *InstancesGetEffectiveFirewallsCall) Do(opts ...googleapi.CallOption) (* if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &InstancesGetEffectiveFirewallsResponse{ ServerResponse: googleapi.ServerResponse{ @@ -95283,17 +99293,17 @@ func (c *InstancesGetGuestAttributesCall) Do(opts ...googleapi.CallOption) (*Gue if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &GuestAttributes{ ServerResponse: googleapi.ServerResponse{ @@ -95473,17 +99483,17 @@ func (c *InstancesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, e if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Policy{ ServerResponse: googleapi.ServerResponse{ @@ -95651,17 +99661,17 @@ func (c *InstancesGetScreenshotCall) Do(opts ...googleapi.CallOption) (*Screensh if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Screenshot{ ServerResponse: googleapi.ServerResponse{ @@ -95849,17 +99859,17 @@ func (c *InstancesGetSerialPortOutputCall) Do(opts ...googleapi.CallOption) (*Se if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &SerialPortOutput{ ServerResponse: googleapi.ServerResponse{ @@ -96037,17 +100047,17 @@ func (c *InstancesGetShieldedInstanceIdentityCall) Do(opts ...googleapi.CallOpti if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ShieldedInstanceIdentity{ ServerResponse: googleapi.ServerResponse{ @@ -96241,17 +100251,17 @@ func (c *InstancesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, erro if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -96501,17 +100511,17 @@ func (c *InstancesListCall) Do(opts ...googleapi.CallOption) (*InstanceList, err if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &InstanceList{ ServerResponse: googleapi.ServerResponse{ @@ -96630,10 +100640,10 @@ type InstancesListReferrersCall struct { // includes the instance group. For more information, read Viewing // referrers to VM instances. // -// - instance: Name of the target instance scoping this request, or '-' -// if the request should span over all instances in the container. -// - project: Project ID for this request. -// - zone: The name of the zone for this request. +// - instance: Name of the target instance scoping this request, or '-' +// if the request should span over all instances in the container. +// - project: Project ID for this request. +// - zone: The name of the zone for this request. func (r *InstancesService) ListReferrers(project string, zone string, instance string) *InstancesListReferrersCall { c := &InstancesListReferrersCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -96801,17 +100811,17 @@ func (c *InstancesListReferrersCall) Do(opts ...googleapi.CallOption) (*Instance if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &InstanceListReferrers{ ServerResponse: googleapi.ServerResponse{ @@ -97031,17 +101041,17 @@ func (c *InstancesRemoveResourcePoliciesCall) Do(opts ...googleapi.CallOption) ( if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -97214,17 +101224,17 @@ func (c *InstancesResetCall) Do(opts ...googleapi.CallOption) (*Operation, error if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -97393,17 +101403,17 @@ func (c *InstancesResumeCall) Do(opts ...googleapi.CallOption) (*Operation, erro if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -97550,7 +101560,7 @@ func (c *InstancesSendDiagnosticInterruptCall) Do(opts ...googleapi.CallOption) } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return err + return gensupport.WrapError(err) } return nil // { @@ -97707,17 +101717,17 @@ func (c *InstancesSetDeletionProtectionCall) Do(opts ...googleapi.CallOption) (* if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -97801,14 +101811,14 @@ type InstancesSetDiskAutoDeleteCall struct { // SetDiskAutoDelete: Sets the auto-delete flag for a disk attached to // an instance. // -// - autoDelete: Whether to auto-delete the disk when the instance is -// deleted. -// - deviceName: The device name of the disk to modify. Make a get() -// request on the instance to view currently attached disks and device -// names. -// - instance: The instance name for this request. -// - project: Project ID for this request. -// - zone: The name of the zone for this request. +// - autoDelete: Whether to auto-delete the disk when the instance is +// deleted. +// - deviceName: The device name of the disk to modify. Make a get() +// request on the instance to view currently attached disks and device +// names. +// - instance: The instance name for this request. +// - project: Project ID for this request. +// - zone: The name of the zone for this request. func (r *InstancesService) SetDiskAutoDelete(project string, zone string, instance string, autoDelete bool, deviceName string) *InstancesSetDiskAutoDeleteCall { c := &InstancesSetDiskAutoDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -97899,17 +101909,17 @@ func (c *InstancesSetDiskAutoDeleteCall) Do(opts ...googleapi.CallOption) (*Oper if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -98084,17 +102094,17 @@ func (c *InstancesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, e if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Policy{ ServerResponse: googleapi.ServerResponse{ @@ -98268,17 +102278,17 @@ func (c *InstancesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, e if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -98457,17 +102467,17 @@ func (c *InstancesSetMachineResourcesCall) Do(opts ...googleapi.CallOption) (*Op if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -98646,17 +102656,17 @@ func (c *InstancesSetMachineTypeCall) Do(opts ...googleapi.CallOption) (*Operati if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -98835,17 +102845,17 @@ func (c *InstancesSetMetadataCall) Do(opts ...googleapi.CallOption) (*Operation, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -99026,17 +103036,17 @@ func (c *InstancesSetMinCpuPlatformCall) Do(opts ...googleapi.CallOption) (*Oper if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -99102,6 +103112,194 @@ func (c *InstancesSetMinCpuPlatformCall) Do(opts ...googleapi.CallOption) (*Oper } +// method id "compute.instances.setName": + +type InstancesSetNameCall struct { + s *Service + project string + zone string + instance string + instancessetnamerequest *InstancesSetNameRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetName: Sets name of an instance. +// +// - instance: The instance name for this request. +// - project: Project ID for this request. +// - zone: The name of the zone for this request. +func (r *InstancesService) SetName(project string, zone string, instance string, instancessetnamerequest *InstancesSetNameRequest) *InstancesSetNameCall { + c := &InstancesSetNameCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instance = instance + c.instancessetnamerequest = instancessetnamerequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *InstancesSetNameCall) RequestId(requestId string) *InstancesSetNameCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstancesSetNameCall) Fields(s ...googleapi.Field) *InstancesSetNameCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstancesSetNameCall) Context(ctx context.Context) *InstancesSetNameCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InstancesSetNameCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InstancesSetNameCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancessetnamerequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/setName") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "instance": c.instance, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.instances.setName" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesSetNameCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Sets name of an instance.", + // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/setName", + // "httpMethod": "POST", + // "id": "compute.instances.setName", + // "parameterOrder": [ + // "project", + // "zone", + // "instance" + // ], + // "parameters": { + // "instance": { + // "description": "The instance name for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/zones/{zone}/instances/{instance}/setName", + // "request": { + // "$ref": "InstancesSetNameRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + // method id "compute.instances.setScheduling": type InstancesSetSchedulingCall struct { @@ -99219,17 +103417,17 @@ func (c *InstancesSetSchedulingCall) Do(opts ...googleapi.CallOption) (*Operatio if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -99409,17 +103607,17 @@ func (c *InstancesSetServiceAccountCall) Do(opts ...googleapi.CallOption) (*Oper if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -99600,17 +103798,17 @@ func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) Do(opts ...googleapi.C if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -99789,17 +103987,17 @@ func (c *InstancesSetTagsCall) Do(opts ...googleapi.CallOption) (*Operation, err if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -99955,17 +104153,17 @@ func (c *InstancesSimulateMaintenanceEventCall) Do(opts ...googleapi.CallOption) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -100129,17 +104327,17 @@ func (c *InstancesStartCall) Do(opts ...googleapi.CallOption) (*Operation, error if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -100316,17 +104514,17 @@ func (c *InstancesStartWithEncryptionKeyCall) Do(opts ...googleapi.CallOption) ( if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -100422,6 +104620,14 @@ func (r *InstancesService) Stop(project string, zone string, instance string) *I return c } +// DiscardLocalSsd sets the optional parameter "discardLocalSsd": If +// true, discard the contents of any attached localSSD partitions. +// Default value is false. +func (c *InstancesStopCall) DiscardLocalSsd(discardLocalSsd bool) *InstancesStopCall { + c.urlParams_.Set("discardLocalSsd", fmt.Sprint(discardLocalSsd)) + return c +} + // RequestId sets the optional parameter "requestId": An optional // request ID to identify requests. Specify a unique request ID so that // if you must retry your request, the server will know to ignore the @@ -100502,17 +104708,17 @@ func (c *InstancesStopCall) Do(opts ...googleapi.CallOption) (*Operation, error) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -100536,6 +104742,11 @@ func (c *InstancesStopCall) Do(opts ...googleapi.CallOption) (*Operation, error) // "instance" // ], // "parameters": { + // "discardLocalSsd": { + // "description": "If true, discard the contents of any attached localSSD partitions. Default value is false.", + // "location": "query", + // "type": "boolean" + // }, // "instance": { // "description": "Name of the instance resource to stop.", // "location": "path", @@ -100607,6 +104818,14 @@ func (r *InstancesService) Suspend(project string, zone string, instance string) return c } +// DiscardLocalSsd sets the optional parameter "discardLocalSsd": If +// true, discard the contents of any attached localSSD partitions. +// Default value is false. +func (c *InstancesSuspendCall) DiscardLocalSsd(discardLocalSsd bool) *InstancesSuspendCall { + c.urlParams_.Set("discardLocalSsd", fmt.Sprint(discardLocalSsd)) + return c +} + // RequestId sets the optional parameter "requestId": An optional // request ID to identify requests. Specify a unique request ID so that // if you must retry your request, the server will know to ignore the @@ -100687,17 +104906,17 @@ func (c *InstancesSuspendCall) Do(opts ...googleapi.CallOption) (*Operation, err if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -100721,6 +104940,11 @@ func (c *InstancesSuspendCall) Do(opts ...googleapi.CallOption) (*Operation, err // "instance" // ], // "parameters": { + // "discardLocalSsd": { + // "description": "If true, discard the contents of any attached localSSD partitions. Default value is false.", + // "location": "query", + // "type": "boolean" + // }, // "instance": { // "description": "Name of the instance resource to suspend.", // "location": "path", @@ -100857,17 +105081,17 @@ func (c *InstancesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Tes if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ @@ -100965,10 +105189,11 @@ func (r *InstancesService) Update(project string, zone string, instance string, // acts based on the minimum action that the updated properties require. // // Possible values: -// "INVALID" -// "NO_EFFECT" - No changes can be made to the instance. -// "REFRESH" - The instance will not restart. -// "RESTART" - The instance will restart. +// +// "INVALID" +// "NO_EFFECT" - No changes can be made to the instance. +// "REFRESH" - The instance will not restart. +// "RESTART" - The instance will restart. func (c *InstancesUpdateCall) MinimalAction(minimalAction string) *InstancesUpdateCall { c.urlParams_.Set("minimalAction", minimalAction) return c @@ -100982,10 +105207,11 @@ func (c *InstancesUpdateCall) MinimalAction(minimalAction string) *InstancesUpda // lowest to highest are NO_EFFECT, REFRESH, and RESTART. // // Possible values: -// "INVALID" -// "NO_EFFECT" - No changes can be made to the instance. -// "REFRESH" - The instance will not restart. -// "RESTART" - The instance will restart. +// +// "INVALID" +// "NO_EFFECT" - No changes can be made to the instance. +// "REFRESH" - The instance will not restart. +// "RESTART" - The instance will restart. func (c *InstancesUpdateCall) MostDisruptiveAllowedAction(mostDisruptiveAllowedAction string) *InstancesUpdateCall { c.urlParams_.Set("mostDisruptiveAllowedAction", mostDisruptiveAllowedAction) return c @@ -101076,17 +105302,17 @@ func (c *InstancesUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, erro if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -101204,11 +105430,11 @@ type InstancesUpdateAccessConfigCall struct { // This method supports PATCH semantics and uses the JSON merge patch // format and processing rules. // -// - instance: The instance name for this request. -// - networkInterface: The name of the network interface where the -// access config is attached. -// - project: Project ID for this request. -// - zone: The name of the zone for this request. +// - instance: The instance name for this request. +// - networkInterface: The name of the network interface where the +// access config is attached. +// - project: Project ID for this request. +// - zone: The name of the zone for this request. func (r *InstancesService) UpdateAccessConfig(project string, zone string, instance string, networkInterface string, accessconfig *AccessConfig) *InstancesUpdateAccessConfigCall { c := &InstancesUpdateAccessConfigCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -101304,17 +105530,17 @@ func (c *InstancesUpdateAccessConfigCall) Do(opts ...googleapi.CallOption) (*Ope if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -101502,17 +105728,17 @@ func (c *InstancesUpdateDisplayDeviceCall) Do(opts ...googleapi.CallOption) (*Op if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -101697,17 +105923,17 @@ func (c *InstancesUpdateNetworkInterfaceCall) Do(opts ...googleapi.CallOption) ( if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -101895,17 +106121,17 @@ func (c *InstancesUpdateShieldedInstanceConfigCall) Do(opts ...googleapi.CallOpt if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -102163,17 +106389,17 @@ func (c *InterconnectAttachmentsAggregatedListCall) Do(opts ...googleapi.CallOpt if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &InterconnectAttachmentAggregatedList{ ServerResponse: googleapi.ServerResponse{ @@ -102284,10 +106510,10 @@ type InterconnectAttachmentsDeleteCall struct { // Delete: Deletes the specified interconnect attachment. // -// - interconnectAttachment: Name of the interconnect attachment to -// delete. -// - project: Project ID for this request. -// - region: Name of the region for this request. +// - interconnectAttachment: Name of the interconnect attachment to +// delete. +// - project: Project ID for this request. +// - region: Name of the region for this request. func (r *InterconnectAttachmentsService) Delete(project string, region string, interconnectAttachment string) *InterconnectAttachmentsDeleteCall { c := &InterconnectAttachmentsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -102376,17 +106602,17 @@ func (c *InterconnectAttachmentsDeleteCall) Do(opts ...googleapi.CallOption) (*O if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -102464,10 +106690,10 @@ type InterconnectAttachmentsGetCall struct { // Get: Returns the specified interconnect attachment. // -// - interconnectAttachment: Name of the interconnect attachment to -// return. -// - project: Project ID for this request. -// - region: Name of the region for this request. +// - interconnectAttachment: Name of the interconnect attachment to +// return. +// - project: Project ID for this request. +// - region: Name of the region for this request. func (r *InterconnectAttachmentsService) Get(project string, region string, interconnectAttachment string) *InterconnectAttachmentsGetCall { c := &InterconnectAttachmentsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -102553,17 +106779,17 @@ func (c *InterconnectAttachmentsGetCall) Do(opts ...googleapi.CallOption) (*Inte if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &InterconnectAttachment{ ServerResponse: googleapi.ServerResponse{ @@ -102738,17 +106964,17 @@ func (c *InterconnectAttachmentsInsertCall) Do(opts ...googleapi.CallOption) (*O if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -102993,17 +107219,17 @@ func (c *InterconnectAttachmentsListCall) Do(opts ...googleapi.CallOption) (*Int if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &InterconnectAttachmentList{ ServerResponse: googleapi.ServerResponse{ @@ -103120,10 +107346,10 @@ type InterconnectAttachmentsPatchCall struct { // included in the request. This method supports PATCH semantics and // uses the JSON merge patch format and processing rules. // -// - interconnectAttachment: Name of the interconnect attachment to -// patch. -// - project: Project ID for this request. -// - region: Name of the region scoping this request. +// - interconnectAttachment: Name of the interconnect attachment to +// patch. +// - project: Project ID for this request. +// - region: Name of the region scoping this request. func (r *InterconnectAttachmentsService) Patch(project string, region string, interconnectAttachment string, interconnectattachment *InterconnectAttachment) *InterconnectAttachmentsPatchCall { c := &InterconnectAttachmentsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -103218,17 +107444,17 @@ func (c *InterconnectAttachmentsPatchCall) Do(opts ...googleapi.CallOption) (*Op if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -103294,6 +107520,195 @@ func (c *InterconnectAttachmentsPatchCall) Do(opts ...googleapi.CallOption) (*Op } +// method id "compute.interconnectAttachments.setLabels": + +type InterconnectAttachmentsSetLabelsCall struct { + s *Service + project string + region string + resource string + regionsetlabelsrequest *RegionSetLabelsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetLabels: Sets the labels on an InterconnectAttachment. To learn +// more about labels, read the Labeling Resources documentation. +// +// - project: Project ID for this request. +// - region: The region for this request. +// - resource: Name or id of the resource for this request. +func (r *InterconnectAttachmentsService) SetLabels(project string, region string, resource string, regionsetlabelsrequest *RegionSetLabelsRequest) *InterconnectAttachmentsSetLabelsCall { + c := &InterconnectAttachmentsSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.resource = resource + c.regionsetlabelsrequest = regionsetlabelsrequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *InterconnectAttachmentsSetLabelsCall) RequestId(requestId string) *InterconnectAttachmentsSetLabelsCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InterconnectAttachmentsSetLabelsCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsSetLabelsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InterconnectAttachmentsSetLabelsCall) Context(ctx context.Context) *InterconnectAttachmentsSetLabelsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InterconnectAttachmentsSetLabelsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InterconnectAttachmentsSetLabelsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regionsetlabelsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/interconnectAttachments/{resource}/setLabels") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.interconnectAttachments.setLabels" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InterconnectAttachmentsSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Sets the labels on an InterconnectAttachment. To learn more about labels, read the Labeling Resources documentation.", + // "flatPath": "projects/{project}/regions/{region}/interconnectAttachments/{resource}/setLabels", + // "httpMethod": "POST", + // "id": "compute.interconnectAttachments.setLabels", + // "parameterOrder": [ + // "project", + // "region", + // "resource" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "The region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/regions/{region}/interconnectAttachments/{resource}/setLabels", + // "request": { + // "$ref": "RegionSetLabelsRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + // method id "compute.interconnectLocations.get": type InterconnectLocationsGetCall struct { @@ -103395,17 +107810,17 @@ func (c *InterconnectLocationsGetCall) Do(opts ...googleapi.CallOption) (*Interc if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &InterconnectLocation{ ServerResponse: googleapi.ServerResponse{ @@ -103634,17 +108049,17 @@ func (c *InterconnectLocationsListCall) Do(opts ...googleapi.CallOption) (*Inter if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &InterconnectLocationList{ ServerResponse: googleapi.ServerResponse{ @@ -103747,7 +108162,7 @@ type InterconnectsDeleteCall struct { header_ http.Header } -// Delete: Deletes the specified interconnect. +// Delete: Deletes the specified Interconnect. // // - interconnect: Name of the interconnect to delete. // - project: Project ID for this request. @@ -103837,17 +108252,17 @@ func (c *InterconnectsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -103861,7 +108276,7 @@ func (c *InterconnectsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Deletes the specified interconnect.", + // "description": "Deletes the specified Interconnect.", // "flatPath": "projects/{project}/global/interconnects/{interconnect}", // "httpMethod": "DELETE", // "id": "compute.interconnects.delete", @@ -103914,8 +108329,8 @@ type InterconnectsGetCall struct { header_ http.Header } -// Get: Returns the specified interconnect. Get a list of available -// interconnects by making a list() request. +// Get: Returns the specified Interconnect. Get a list of available +// Interconnects by making a list() request. // // - interconnect: Name of the interconnect to return. // - project: Project ID for this request. @@ -104002,17 +108417,17 @@ func (c *InterconnectsGetCall) Do(opts ...googleapi.CallOption) (*Interconnect, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Interconnect{ ServerResponse: googleapi.ServerResponse{ @@ -104026,7 +108441,7 @@ func (c *InterconnectsGetCall) Do(opts ...googleapi.CallOption) (*Interconnect, } return ret, nil // { - // "description": "Returns the specified interconnect. Get a list of available interconnects by making a list() request.", + // "description": "Returns the specified Interconnect. Get a list of available Interconnects by making a list() request.", // "flatPath": "projects/{project}/global/interconnects/{interconnect}", // "httpMethod": "GET", // "id": "compute.interconnects.get", @@ -104076,7 +108491,7 @@ type InterconnectsGetDiagnosticsCall struct { } // GetDiagnostics: Returns the interconnectDiagnostics for the specified -// interconnect. +// Interconnect. // // - interconnect: Name of the interconnect resource to query. // - project: Project ID for this request. @@ -104164,17 +108579,17 @@ func (c *InterconnectsGetDiagnosticsCall) Do(opts ...googleapi.CallOption) (*Int if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &InterconnectsGetDiagnosticsResponse{ ServerResponse: googleapi.ServerResponse{ @@ -104188,7 +108603,7 @@ func (c *InterconnectsGetDiagnosticsCall) Do(opts ...googleapi.CallOption) (*Int } return ret, nil // { - // "description": "Returns the interconnectDiagnostics for the specified interconnect.", + // "description": "Returns the interconnectDiagnostics for the specified Interconnect.", // "flatPath": "projects/{project}/global/interconnects/{interconnect}/getDiagnostics", // "httpMethod": "GET", // "id": "compute.interconnects.getDiagnostics", @@ -104236,7 +108651,7 @@ type InterconnectsInsertCall struct { header_ http.Header } -// Insert: Creates a Interconnect in the specified project using the +// Insert: Creates an Interconnect in the specified project using the // data included in the request. // // - project: Project ID for this request. @@ -104330,17 +108745,17 @@ func (c *InterconnectsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -104354,7 +108769,7 @@ func (c *InterconnectsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Creates a Interconnect in the specified project using the data included in the request.", + // "description": "Creates an Interconnect in the specified project using the data included in the request.", // "flatPath": "projects/{project}/global/interconnects", // "httpMethod": "POST", // "id": "compute.interconnects.insert", @@ -104401,7 +108816,7 @@ type InterconnectsListCall struct { header_ http.Header } -// List: Retrieves the list of interconnect available to the specified +// List: Retrieves the list of Interconnects available to the specified // project. // // - project: Project ID for this request. @@ -104568,17 +108983,17 @@ func (c *InterconnectsListCall) Do(opts ...googleapi.CallOption) (*InterconnectL if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &InterconnectList{ ServerResponse: googleapi.ServerResponse{ @@ -104592,7 +109007,7 @@ func (c *InterconnectsListCall) Do(opts ...googleapi.CallOption) (*InterconnectL } return ret, nil // { - // "description": "Retrieves the list of interconnect available to the specified project.", + // "description": "Retrieves the list of Interconnects available to the specified project.", // "flatPath": "projects/{project}/global/interconnects", // "httpMethod": "GET", // "id": "compute.interconnects.list", @@ -104682,7 +109097,7 @@ type InterconnectsPatchCall struct { header_ http.Header } -// Patch: Updates the specified interconnect with the data included in +// Patch: Updates the specified Interconnect with the data included in // the request. This method supports PATCH semantics and uses the JSON // merge patch format and processing rules. // @@ -104780,17 +109195,17 @@ func (c *InterconnectsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, e if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -104804,7 +109219,7 @@ func (c *InterconnectsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, e } return ret, nil // { - // "description": "Updates the specified interconnect with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "description": "Updates the specified Interconnect with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", // "flatPath": "projects/{project}/global/interconnects/{interconnect}", // "httpMethod": "PATCH", // "id": "compute.interconnects.patch", @@ -104848,6 +109263,162 @@ func (c *InterconnectsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, e } +// method id "compute.interconnects.setLabels": + +type InterconnectsSetLabelsCall struct { + s *Service + project string + resource string + globalsetlabelsrequest *GlobalSetLabelsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetLabels: Sets the labels on an Interconnect. To learn more about +// labels, read the Labeling Resources documentation. +// +// - project: Project ID for this request. +// - resource: Name or id of the resource for this request. +func (r *InterconnectsService) SetLabels(project string, resource string, globalsetlabelsrequest *GlobalSetLabelsRequest) *InterconnectsSetLabelsCall { + c := &InterconnectsSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.resource = resource + c.globalsetlabelsrequest = globalsetlabelsrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InterconnectsSetLabelsCall) Fields(s ...googleapi.Field) *InterconnectsSetLabelsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InterconnectsSetLabelsCall) Context(ctx context.Context) *InterconnectsSetLabelsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InterconnectsSetLabelsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InterconnectsSetLabelsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.globalsetlabelsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/interconnects/{resource}/setLabels") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.interconnects.setLabels" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InterconnectsSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Sets the labels on an Interconnect. To learn more about labels, read the Labeling Resources documentation.", + // "flatPath": "projects/{project}/global/interconnects/{resource}/setLabels", + // "httpMethod": "POST", + // "id": "compute.interconnects.setLabels", + // "parameterOrder": [ + // "project", + // "resource" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/global/interconnects/{resource}/setLabels", + // "request": { + // "$ref": "GlobalSetLabelsRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + // method id "compute.licenseCodes.get": type LicenseCodesGetCall struct { @@ -104865,9 +109436,9 @@ type LicenseCodesGetCall struct { // *Caution* This resource is intended for use only by third-party // partners who are creating Cloud Marketplace images. // -// - licenseCode: Number corresponding to the License code resource to -// return. -// - project: Project ID for this request. +// - licenseCode: Number corresponding to the License code resource to +// return. +// - project: Project ID for this request. func (r *LicenseCodesService) Get(project string, licenseCode string) *LicenseCodesGetCall { c := &LicenseCodesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -104951,17 +109522,17 @@ func (c *LicenseCodesGetCall) Do(opts ...googleapi.CallOption) (*LicenseCode, er if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &LicenseCode{ ServerResponse: googleapi.ServerResponse{ @@ -105106,17 +109677,17 @@ func (c *LicenseCodesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (* if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ @@ -105273,17 +109844,17 @@ func (c *LicensesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -105439,17 +110010,17 @@ func (c *LicensesGetCall) Do(opts ...googleapi.CallOption) (*License, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &License{ ServerResponse: googleapi.ServerResponse{ @@ -105609,17 +110180,17 @@ func (c *LicensesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, er if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Policy{ ServerResponse: googleapi.ServerResponse{ @@ -105782,17 +110353,17 @@ func (c *LicensesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -106029,17 +110600,17 @@ func (c *LicensesListCall) Do(opts ...googleapi.CallOption) (*LicensesListRespon if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &LicensesListResponse{ ServerResponse: googleapi.ServerResponse{ @@ -106226,17 +110797,17 @@ func (c *LicensesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, er if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Policy{ ServerResponse: googleapi.ServerResponse{ @@ -106383,17 +110954,17 @@ func (c *LicensesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Test if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ @@ -106549,17 +111120,17 @@ func (c *MachineImagesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -106626,8 +111197,7 @@ type MachineImagesGetCall struct { header_ http.Header } -// Get: Returns the specified machine image. Gets a list of available -// machine images by making a list() request. +// Get: Returns the specified machine image. // // - machineImage: The name of the machine image. // - project: Project ID for this request. @@ -106714,17 +111284,17 @@ func (c *MachineImagesGetCall) Do(opts ...googleapi.CallOption) (*MachineImage, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &MachineImage{ ServerResponse: googleapi.ServerResponse{ @@ -106738,7 +111308,7 @@ func (c *MachineImagesGetCall) Do(opts ...googleapi.CallOption) (*MachineImage, } return ret, nil // { - // "description": "Returns the specified machine image. Gets a list of available machine images by making a list() request.", + // "description": "Returns the specified machine image.", // "flatPath": "projects/{project}/global/machineImages/{machineImage}", // "httpMethod": "GET", // "id": "compute.machineImages.get", @@ -106882,17 +111452,17 @@ func (c *MachineImagesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Polic if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Policy{ ServerResponse: googleapi.ServerResponse{ @@ -107065,17 +111635,17 @@ func (c *MachineImagesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -107308,17 +111878,17 @@ func (c *MachineImagesListCall) Do(opts ...googleapi.CallOption) (*MachineImageL if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &MachineImageList{ ServerResponse: googleapi.ServerResponse{ @@ -107503,17 +112073,17 @@ func (c *MachineImagesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Polic if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Policy{ ServerResponse: googleapi.ServerResponse{ @@ -107659,17 +112229,17 @@ func (c *MachineImagesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) ( if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ @@ -107913,17 +112483,17 @@ func (c *MachineTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Mach if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &MachineTypeAggregatedList{ ServerResponse: googleapi.ServerResponse{ @@ -108033,8 +112603,7 @@ type MachineTypesGetCall struct { header_ http.Header } -// Get: Returns the specified machine type. Gets a list of available -// machine types by making a list() request. +// Get: Returns the specified machine type. // // - machineType: Name of the machine type to return. // - project: Project ID for this request. @@ -108124,17 +112693,17 @@ func (c *MachineTypesGetCall) Do(opts ...googleapi.CallOption) (*MachineType, er if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &MachineType{ ServerResponse: googleapi.ServerResponse{ @@ -108148,7 +112717,7 @@ func (c *MachineTypesGetCall) Do(opts ...googleapi.CallOption) (*MachineType, er } return ret, nil // { - // "description": "Returns the specified machine type. Gets a list of available machine types by making a list() request.", + // "description": "Returns the specified machine type.", // "flatPath": "projects/{project}/zones/{zone}/machineTypes/{machineType}", // "httpMethod": "GET", // "id": "compute.machineTypes.get", @@ -108375,17 +112944,17 @@ func (c *MachineTypesListCall) Do(opts ...googleapi.CallOption) (*MachineTypeLis if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &MachineTypeList{ ServerResponse: googleapi.ServerResponse{ @@ -108485,9 +113054,9 @@ func (c *MachineTypesListCall) Pages(ctx context.Context, f func(*MachineTypeLis } } -// method id "compute.networkEdgeSecurityServices.aggregatedList": +// method id "compute.networkAttachments.aggregatedList": -type NetworkEdgeSecurityServicesAggregatedListCall struct { +type NetworkAttachmentsAggregatedListCall struct { s *Service project string urlParams_ gensupport.URLParams @@ -108496,12 +113065,12 @@ type NetworkEdgeSecurityServicesAggregatedListCall struct { header_ http.Header } -// AggregatedList: Retrieves the list of all NetworkEdgeSecurityService -// resources available to the specified project. +// AggregatedList: Retrieves the list of all NetworkAttachment +// resources, regional and global, available to the specified project. // -// - project: Name of the project scoping this request. -func (r *NetworkEdgeSecurityServicesService) AggregatedList(project string) *NetworkEdgeSecurityServicesAggregatedListCall { - c := &NetworkEdgeSecurityServicesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - project: Project ID for this request. +func (r *NetworkAttachmentsService) AggregatedList(project string) *NetworkAttachmentsAggregatedListCall { + c := &NetworkAttachmentsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project return c } @@ -108541,7 +113110,7 @@ func (r *NetworkEdgeSecurityServicesService) AggregatedList(project string) *Net // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne // .*instance`. -func (c *NetworkEdgeSecurityServicesAggregatedListCall) Filter(filter string) *NetworkEdgeSecurityServicesAggregatedListCall { +func (c *NetworkAttachmentsAggregatedListCall) Filter(filter string) *NetworkAttachmentsAggregatedListCall { c.urlParams_.Set("filter", filter) return c } @@ -108554,7 +113123,7 @@ func (c *NetworkEdgeSecurityServicesAggregatedListCall) Filter(filter string) *N // response. For resource types which predate this field, if this flag // is omitted or false, only scopes of the scope types where the // resource type is expected to be found will be included. -func (c *NetworkEdgeSecurityServicesAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *NetworkEdgeSecurityServicesAggregatedListCall { +func (c *NetworkAttachmentsAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *NetworkAttachmentsAggregatedListCall { c.urlParams_.Set("includeAllScopes", fmt.Sprint(includeAllScopes)) return c } @@ -108565,7 +113134,7 @@ func (c *NetworkEdgeSecurityServicesAggregatedListCall) IncludeAllScopes(include // a `nextPageToken` that can be used to get the next page of results in // subsequent list requests. Acceptable values are `0` to `500`, // inclusive. (Default: `500`) -func (c *NetworkEdgeSecurityServicesAggregatedListCall) MaxResults(maxResults int64) *NetworkEdgeSecurityServicesAggregatedListCall { +func (c *NetworkAttachmentsAggregatedListCall) MaxResults(maxResults int64) *NetworkAttachmentsAggregatedListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -108579,7 +113148,7 @@ func (c *NetworkEdgeSecurityServicesAggregatedListCall) MaxResults(maxResults in // result first). Use this to sort resources like operations so that the // newest operation is returned first. Currently, only sorting by `name` // or `creationTimestamp desc` is supported. -func (c *NetworkEdgeSecurityServicesAggregatedListCall) OrderBy(orderBy string) *NetworkEdgeSecurityServicesAggregatedListCall { +func (c *NetworkAttachmentsAggregatedListCall) OrderBy(orderBy string) *NetworkAttachmentsAggregatedListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -108587,7 +113156,7 @@ func (c *NetworkEdgeSecurityServicesAggregatedListCall) OrderBy(orderBy string) // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. -func (c *NetworkEdgeSecurityServicesAggregatedListCall) PageToken(pageToken string) *NetworkEdgeSecurityServicesAggregatedListCall { +func (c *NetworkAttachmentsAggregatedListCall) PageToken(pageToken string) *NetworkAttachmentsAggregatedListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -108596,7 +113165,7 @@ func (c *NetworkEdgeSecurityServicesAggregatedListCall) PageToken(pageToken stri // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is // false. -func (c *NetworkEdgeSecurityServicesAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *NetworkEdgeSecurityServicesAggregatedListCall { +func (c *NetworkAttachmentsAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *NetworkAttachmentsAggregatedListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c } @@ -108604,7 +113173,7 @@ func (c *NetworkEdgeSecurityServicesAggregatedListCall) ReturnPartialSuccess(ret // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NetworkEdgeSecurityServicesAggregatedListCall) Fields(s ...googleapi.Field) *NetworkEdgeSecurityServicesAggregatedListCall { +func (c *NetworkAttachmentsAggregatedListCall) Fields(s ...googleapi.Field) *NetworkAttachmentsAggregatedListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -108614,7 +113183,7 @@ func (c *NetworkEdgeSecurityServicesAggregatedListCall) Fields(s ...googleapi.Fi // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *NetworkEdgeSecurityServicesAggregatedListCall) IfNoneMatch(entityTag string) *NetworkEdgeSecurityServicesAggregatedListCall { +func (c *NetworkAttachmentsAggregatedListCall) IfNoneMatch(entityTag string) *NetworkAttachmentsAggregatedListCall { c.ifNoneMatch_ = entityTag return c } @@ -108622,21 +113191,21 @@ func (c *NetworkEdgeSecurityServicesAggregatedListCall) IfNoneMatch(entityTag st // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NetworkEdgeSecurityServicesAggregatedListCall) Context(ctx context.Context) *NetworkEdgeSecurityServicesAggregatedListCall { +func (c *NetworkAttachmentsAggregatedListCall) Context(ctx context.Context) *NetworkAttachmentsAggregatedListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NetworkEdgeSecurityServicesAggregatedListCall) Header() http.Header { +func (c *NetworkAttachmentsAggregatedListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NetworkEdgeSecurityServicesAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworkAttachmentsAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -108649,7 +113218,7 @@ func (c *NetworkEdgeSecurityServicesAggregatedListCall) doRequest(alt string) (* var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/aggregated/networkEdgeSecurityServices") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/aggregated/networkAttachments") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -108662,35 +113231,33 @@ func (c *NetworkEdgeSecurityServicesAggregatedListCall) doRequest(alt string) (* return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.networkEdgeSecurityServices.aggregatedList" call. -// Exactly one of *NetworkEdgeSecurityServiceAggregatedList or error -// will be non-nil. Any non-2xx status code is an error. Response -// headers are in either -// *NetworkEdgeSecurityServiceAggregatedList.ServerResponse.Header or -// (if a response was returned at all) in -// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check -// whether the returned error was because http.StatusNotModified was -// returned. -func (c *NetworkEdgeSecurityServicesAggregatedListCall) Do(opts ...googleapi.CallOption) (*NetworkEdgeSecurityServiceAggregatedList, error) { +// Do executes the "compute.networkAttachments.aggregatedList" call. +// Exactly one of *NetworkAttachmentAggregatedList or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *NetworkAttachmentAggregatedList.ServerResponse.Header or (if +// a response was returned at all) in error.(*googleapi.Error).Header. +// Use googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *NetworkAttachmentsAggregatedListCall) Do(opts ...googleapi.CallOption) (*NetworkAttachmentAggregatedList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &NetworkEdgeSecurityServiceAggregatedList{ + ret := &NetworkAttachmentAggregatedList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -108702,10 +113269,10 @@ func (c *NetworkEdgeSecurityServicesAggregatedListCall) Do(opts ...googleapi.Cal } return ret, nil // { - // "description": "Retrieves the list of all NetworkEdgeSecurityService resources available to the specified project.", - // "flatPath": "projects/{project}/aggregated/networkEdgeSecurityServices", + // "description": "Retrieves the list of all NetworkAttachment resources, regional and global, available to the specified project.", + // "flatPath": "projects/{project}/aggregated/networkAttachments", // "httpMethod": "GET", - // "id": "compute.networkEdgeSecurityServices.aggregatedList", + // "id": "compute.networkAttachments.aggregatedList", // "parameterOrder": [ // "project" // ], @@ -108739,7 +113306,7 @@ func (c *NetworkEdgeSecurityServicesAggregatedListCall) Do(opts ...googleapi.Cal // "type": "string" // }, // "project": { - // "description": "Name of the project scoping this request.", + // "description": "Project ID for this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, @@ -108751,9 +113318,9 @@ func (c *NetworkEdgeSecurityServicesAggregatedListCall) Do(opts ...googleapi.Cal // "type": "boolean" // } // }, - // "path": "projects/{project}/aggregated/networkEdgeSecurityServices", + // "path": "projects/{project}/aggregated/networkAttachments", // "response": { - // "$ref": "NetworkEdgeSecurityServiceAggregatedList" + // "$ref": "NetworkAttachmentAggregatedList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -108767,7 +113334,7 @@ func (c *NetworkEdgeSecurityServicesAggregatedListCall) Do(opts ...googleapi.Cal // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *NetworkEdgeSecurityServicesAggregatedListCall) Pages(ctx context.Context, f func(*NetworkEdgeSecurityServiceAggregatedList) error) error { +func (c *NetworkAttachmentsAggregatedListCall) Pages(ctx context.Context, f func(*NetworkAttachmentAggregatedList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -108785,29 +113352,29 @@ func (c *NetworkEdgeSecurityServicesAggregatedListCall) Pages(ctx context.Contex } } -// method id "compute.networkEdgeSecurityServices.delete": +// method id "compute.networkAttachments.delete": -type NetworkEdgeSecurityServicesDeleteCall struct { - s *Service - project string - region string - networkEdgeSecurityService string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type NetworkAttachmentsDeleteCall struct { + s *Service + project string + region string + networkAttachment string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified service. +// Delete: Deletes the specified NetworkAttachment in the given scope // -// - networkEdgeSecurityService: Name of the network edge security -// service to delete. -// - project: Project ID for this request. -// - region: Name of the region scoping this request. -func (r *NetworkEdgeSecurityServicesService) Delete(project string, region string, networkEdgeSecurityService string) *NetworkEdgeSecurityServicesDeleteCall { - c := &NetworkEdgeSecurityServicesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - networkAttachment: Name of the NetworkAttachment resource to +// delete. +// - project: Project ID for this request. +// - region: Name of the region of this request. +func (r *NetworkAttachmentsService) Delete(project string, region string, networkAttachment string) *NetworkAttachmentsDeleteCall { + c := &NetworkAttachmentsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.networkEdgeSecurityService = networkEdgeSecurityService + c.networkAttachment = networkAttachment return c } @@ -108821,8 +113388,9 @@ func (r *NetworkEdgeSecurityServicesService) Delete(project string, region strin // received, and if so, will ignore the second request. This prevents // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not -// supported ( 00000000-0000-0000-0000-000000000000). -func (c *NetworkEdgeSecurityServicesDeleteCall) RequestId(requestId string) *NetworkEdgeSecurityServicesDeleteCall { +// supported ( 00000000-0000-0000-0000-000000000000). end_interface: +// MixerMutationRequestBuilder +func (c *NetworkAttachmentsDeleteCall) RequestId(requestId string) *NetworkAttachmentsDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -108830,7 +113398,7 @@ func (c *NetworkEdgeSecurityServicesDeleteCall) RequestId(requestId string) *Net // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NetworkEdgeSecurityServicesDeleteCall) Fields(s ...googleapi.Field) *NetworkEdgeSecurityServicesDeleteCall { +func (c *NetworkAttachmentsDeleteCall) Fields(s ...googleapi.Field) *NetworkAttachmentsDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -108838,21 +113406,21 @@ func (c *NetworkEdgeSecurityServicesDeleteCall) Fields(s ...googleapi.Field) *Ne // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NetworkEdgeSecurityServicesDeleteCall) Context(ctx context.Context) *NetworkEdgeSecurityServicesDeleteCall { +func (c *NetworkAttachmentsDeleteCall) Context(ctx context.Context) *NetworkAttachmentsDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NetworkEdgeSecurityServicesDeleteCall) Header() http.Header { +func (c *NetworkAttachmentsDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NetworkEdgeSecurityServicesDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworkAttachmentsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -108862,7 +113430,7 @@ func (c *NetworkEdgeSecurityServicesDeleteCall) doRequest(alt string) (*http.Res var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/networkEdgeSecurityServices/{networkEdgeSecurityService}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/networkAttachments/{networkAttachment}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("DELETE", urls, body) if err != nil { @@ -108870,38 +113438,38 @@ func (c *NetworkEdgeSecurityServicesDeleteCall) doRequest(alt string) (*http.Res } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "networkEdgeSecurityService": c.networkEdgeSecurityService, + "project": c.project, + "region": c.region, + "networkAttachment": c.networkAttachment, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.networkEdgeSecurityServices.delete" call. +// Do executes the "compute.networkAttachments.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *NetworkEdgeSecurityServicesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *NetworkAttachmentsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -108915,18 +113483,18 @@ func (c *NetworkEdgeSecurityServicesDeleteCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Deletes the specified service.", - // "flatPath": "projects/{project}/regions/{region}/networkEdgeSecurityServices/{networkEdgeSecurityService}", + // "description": "Deletes the specified NetworkAttachment in the given scope", + // "flatPath": "projects/{project}/regions/{region}/networkAttachments/{networkAttachment}", // "httpMethod": "DELETE", - // "id": "compute.networkEdgeSecurityServices.delete", + // "id": "compute.networkAttachments.delete", // "parameterOrder": [ // "project", // "region", - // "networkEdgeSecurityService" + // "networkAttachment" // ], // "parameters": { - // "networkEdgeSecurityService": { - // "description": "Name of the network edge security service to delete.", + // "networkAttachment": { + // "description": "Name of the NetworkAttachment resource to delete.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -108940,19 +113508,19 @@ func (c *NetworkEdgeSecurityServicesDeleteCall) Do(opts ...googleapi.CallOption) // "type": "string" // }, // "region": { - // "description": "Name of the region scoping this request.", + // "description": "Name of the region of this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000). end_interface: MixerMutationRequestBuilder", // "location": "query", // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/networkEdgeSecurityServices/{networkEdgeSecurityService}", + // "path": "projects/{project}/regions/{region}/networkAttachments/{networkAttachment}", // "response": { // "$ref": "Operation" // }, @@ -108964,37 +113532,38 @@ func (c *NetworkEdgeSecurityServicesDeleteCall) Do(opts ...googleapi.CallOption) } -// method id "compute.networkEdgeSecurityServices.get": +// method id "compute.networkAttachments.get": -type NetworkEdgeSecurityServicesGetCall struct { - s *Service - project string - region string - networkEdgeSecurityService string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type NetworkAttachmentsGetCall struct { + s *Service + project string + region string + networkAttachment string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Get: Gets a specified NetworkEdgeSecurityService. +// Get: Returns the specified NetworkAttachment resource in the given +// scope. // -// - networkEdgeSecurityService: Name of the network edge security -// service to get. -// - project: Project ID for this request. -// - region: Name of the region scoping this request. -func (r *NetworkEdgeSecurityServicesService) Get(project string, region string, networkEdgeSecurityService string) *NetworkEdgeSecurityServicesGetCall { - c := &NetworkEdgeSecurityServicesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - networkAttachment: Name of the NetworkAttachment resource to +// return. +// - project: Project ID for this request. +// - region: Name of the region of this request. +func (r *NetworkAttachmentsService) Get(project string, region string, networkAttachment string) *NetworkAttachmentsGetCall { + c := &NetworkAttachmentsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.networkEdgeSecurityService = networkEdgeSecurityService + c.networkAttachment = networkAttachment return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NetworkEdgeSecurityServicesGetCall) Fields(s ...googleapi.Field) *NetworkEdgeSecurityServicesGetCall { +func (c *NetworkAttachmentsGetCall) Fields(s ...googleapi.Field) *NetworkAttachmentsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -109004,7 +113573,7 @@ func (c *NetworkEdgeSecurityServicesGetCall) Fields(s ...googleapi.Field) *Netwo // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *NetworkEdgeSecurityServicesGetCall) IfNoneMatch(entityTag string) *NetworkEdgeSecurityServicesGetCall { +func (c *NetworkAttachmentsGetCall) IfNoneMatch(entityTag string) *NetworkAttachmentsGetCall { c.ifNoneMatch_ = entityTag return c } @@ -109012,21 +113581,21 @@ func (c *NetworkEdgeSecurityServicesGetCall) IfNoneMatch(entityTag string) *Netw // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NetworkEdgeSecurityServicesGetCall) Context(ctx context.Context) *NetworkEdgeSecurityServicesGetCall { +func (c *NetworkAttachmentsGetCall) Context(ctx context.Context) *NetworkAttachmentsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NetworkEdgeSecurityServicesGetCall) Header() http.Header { +func (c *NetworkAttachmentsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NetworkEdgeSecurityServicesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworkAttachmentsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -109039,7 +113608,7 @@ func (c *NetworkEdgeSecurityServicesGetCall) doRequest(alt string) (*http.Respon var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/networkEdgeSecurityServices/{networkEdgeSecurityService}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/networkAttachments/{networkAttachment}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -109047,40 +113616,40 @@ func (c *NetworkEdgeSecurityServicesGetCall) doRequest(alt string) (*http.Respon } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "networkEdgeSecurityService": c.networkEdgeSecurityService, + "project": c.project, + "region": c.region, + "networkAttachment": c.networkAttachment, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.networkEdgeSecurityServices.get" call. -// Exactly one of *NetworkEdgeSecurityService or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *NetworkEdgeSecurityService.ServerResponse.Header or (if a response -// was returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.networkAttachments.get" call. +// Exactly one of *NetworkAttachment or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *NetworkAttachment.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *NetworkEdgeSecurityServicesGetCall) Do(opts ...googleapi.CallOption) (*NetworkEdgeSecurityService, error) { +func (c *NetworkAttachmentsGetCall) Do(opts ...googleapi.CallOption) (*NetworkAttachment, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &NetworkEdgeSecurityService{ + ret := &NetworkAttachment{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -109092,18 +113661,18 @@ func (c *NetworkEdgeSecurityServicesGetCall) Do(opts ...googleapi.CallOption) (* } return ret, nil // { - // "description": "Gets a specified NetworkEdgeSecurityService.", - // "flatPath": "projects/{project}/regions/{region}/networkEdgeSecurityServices/{networkEdgeSecurityService}", + // "description": "Returns the specified NetworkAttachment resource in the given scope.", + // "flatPath": "projects/{project}/regions/{region}/networkAttachments/{networkAttachment}", // "httpMethod": "GET", - // "id": "compute.networkEdgeSecurityServices.get", + // "id": "compute.networkAttachments.get", // "parameterOrder": [ // "project", // "region", - // "networkEdgeSecurityService" + // "networkAttachment" // ], // "parameters": { - // "networkEdgeSecurityService": { - // "description": "Name of the network edge security service to get.", + // "networkAttachment": { + // "description": "Name of the NetworkAttachment resource to return.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -109117,16 +113686,16 @@ func (c *NetworkEdgeSecurityServicesGetCall) Do(opts ...googleapi.CallOption) (* // "type": "string" // }, // "region": { - // "description": "Name of the region scoping this request.", + // "description": "Name of the region of this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/networkEdgeSecurityServices/{networkEdgeSecurityService}", + // "path": "projects/{project}/regions/{region}/networkAttachments/{networkAttachment}", // "response": { - // "$ref": "NetworkEdgeSecurityService" + // "$ref": "NetworkAttachment" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -109137,135 +113706,130 @@ func (c *NetworkEdgeSecurityServicesGetCall) Do(opts ...googleapi.CallOption) (* } -// method id "compute.networkEdgeSecurityServices.insert": +// method id "compute.networkAttachments.getIamPolicy": -type NetworkEdgeSecurityServicesInsertCall struct { - s *Service - project string - region string - networkedgesecurityservice *NetworkEdgeSecurityService - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type NetworkAttachmentsGetIamPolicyCall struct { + s *Service + project string + region string + resource string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Insert: Creates a new service in the specified project using the data -// included in the request. +// GetIamPolicy: Gets the access control policy for a resource. May be +// empty if no such policy or resource exists. // // - project: Project ID for this request. -// - region: Name of the region scoping this request. -func (r *NetworkEdgeSecurityServicesService) Insert(project string, region string, networkedgesecurityservice *NetworkEdgeSecurityService) *NetworkEdgeSecurityServicesInsertCall { - c := &NetworkEdgeSecurityServicesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - region: The name of the region for this request. +// - resource: Name or id of the resource for this request. +func (r *NetworkAttachmentsService) GetIamPolicy(project string, region string, resource string) *NetworkAttachmentsGetIamPolicyCall { + c := &NetworkAttachmentsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.networkedgesecurityservice = networkedgesecurityservice - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. For example, consider a -// situation where you make an initial request and the request times -// out. If you make the request again with the same request ID, the -// server can check if original operation with the same request ID was -// received, and if so, will ignore the second request. This prevents -// clients from accidentally creating duplicate commitments. The request -// ID must be a valid UUID with the exception that zero UUID is not -// supported ( 00000000-0000-0000-0000-000000000000). -func (c *NetworkEdgeSecurityServicesInsertCall) RequestId(requestId string) *NetworkEdgeSecurityServicesInsertCall { - c.urlParams_.Set("requestId", requestId) + c.resource = resource return c } -// ValidateOnly sets the optional parameter "validateOnly": If true, the -// request will not be committed. -func (c *NetworkEdgeSecurityServicesInsertCall) ValidateOnly(validateOnly bool) *NetworkEdgeSecurityServicesInsertCall { - c.urlParams_.Set("validateOnly", fmt.Sprint(validateOnly)) +// OptionsRequestedPolicyVersion sets the optional parameter +// "optionsRequestedPolicyVersion": Requested IAM Policy version. +func (c *NetworkAttachmentsGetIamPolicyCall) OptionsRequestedPolicyVersion(optionsRequestedPolicyVersion int64) *NetworkAttachmentsGetIamPolicyCall { + c.urlParams_.Set("optionsRequestedPolicyVersion", fmt.Sprint(optionsRequestedPolicyVersion)) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NetworkEdgeSecurityServicesInsertCall) Fields(s ...googleapi.Field) *NetworkEdgeSecurityServicesInsertCall { +func (c *NetworkAttachmentsGetIamPolicyCall) Fields(s ...googleapi.Field) *NetworkAttachmentsGetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *NetworkAttachmentsGetIamPolicyCall) IfNoneMatch(entityTag string) *NetworkAttachmentsGetIamPolicyCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NetworkEdgeSecurityServicesInsertCall) Context(ctx context.Context) *NetworkEdgeSecurityServicesInsertCall { +func (c *NetworkAttachmentsGetIamPolicyCall) Context(ctx context.Context) *NetworkAttachmentsGetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NetworkEdgeSecurityServicesInsertCall) Header() http.Header { +func (c *NetworkAttachmentsGetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NetworkEdgeSecurityServicesInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworkAttachmentsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.networkedgesecurityservice) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/networkEdgeSecurityServices") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/networkAttachments/{resource}/getIamPolicy") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, + "project": c.project, + "region": c.region, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.networkEdgeSecurityServices.insert" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *NetworkEdgeSecurityServicesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.networkAttachments.getIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *NetworkAttachmentsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &Operation{ + ret := &Policy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -109277,15 +113841,22 @@ func (c *NetworkEdgeSecurityServicesInsertCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Creates a new service in the specified project using the data included in the request.", - // "flatPath": "projects/{project}/regions/{region}/networkEdgeSecurityServices", - // "httpMethod": "POST", - // "id": "compute.networkEdgeSecurityServices.insert", + // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", + // "flatPath": "projects/{project}/regions/{region}/networkAttachments/{resource}/getIamPolicy", + // "httpMethod": "GET", + // "id": "compute.networkAttachments.getIamPolicy", // "parameterOrder": [ // "project", - // "region" + // "region", + // "resource" // ], // "parameters": { + // "optionsRequestedPolicyVersion": { + // "description": "Requested IAM Policy version.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -109294,70 +113865,55 @@ func (c *NetworkEdgeSecurityServicesInsertCall) Do(opts ...googleapi.CallOption) // "type": "string" // }, // "region": { - // "description": "Name of the region scoping this request.", + // "description": "The name of the region for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - // "location": "query", + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, // "type": "string" - // }, - // "validateOnly": { - // "description": "If true, the request will not be committed.", - // "location": "query", - // "type": "boolean" // } // }, - // "path": "projects/{project}/regions/{region}/networkEdgeSecurityServices", - // "request": { - // "$ref": "NetworkEdgeSecurityService" - // }, + // "path": "projects/{project}/regions/{region}/networkAttachments/{resource}/getIamPolicy", // "response": { - // "$ref": "Operation" + // "$ref": "Policy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.networkEdgeSecurityServices.patch": +// method id "compute.networkAttachments.insert": -type NetworkEdgeSecurityServicesPatchCall struct { - s *Service - project string - region string - networkEdgeSecurityService string - networkedgesecurityservice *NetworkEdgeSecurityService - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type NetworkAttachmentsInsertCall struct { + s *Service + project string + region string + networkattachment *NetworkAttachment + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Patch: Patches the specified policy with the data included in the -// request. +// Insert: Creates a NetworkAttachment in the specified project in the +// given scope using the parameters that are included in the request. // -// - networkEdgeSecurityService: Name of the network edge security -// service to update. // - project: Project ID for this request. -// - region: Name of the region scoping this request. -func (r *NetworkEdgeSecurityServicesService) Patch(project string, region string, networkEdgeSecurityService string, networkedgesecurityservice *NetworkEdgeSecurityService) *NetworkEdgeSecurityServicesPatchCall { - c := &NetworkEdgeSecurityServicesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - region: Name of the region of this request. +func (r *NetworkAttachmentsService) Insert(project string, region string, networkattachment *NetworkAttachment) *NetworkAttachmentsInsertCall { + c := &NetworkAttachmentsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.networkEdgeSecurityService = networkEdgeSecurityService - c.networkedgesecurityservice = networkedgesecurityservice - return c -} - -// Paths sets the optional parameter "paths": -func (c *NetworkEdgeSecurityServicesPatchCall) Paths(paths ...string) *NetworkEdgeSecurityServicesPatchCall { - c.urlParams_.SetMulti("paths", append([]string{}, paths...)) + c.networkattachment = networkattachment return c } @@ -109371,23 +113927,17 @@ func (c *NetworkEdgeSecurityServicesPatchCall) Paths(paths ...string) *NetworkEd // received, and if so, will ignore the second request. This prevents // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not -// supported ( 00000000-0000-0000-0000-000000000000). -func (c *NetworkEdgeSecurityServicesPatchCall) RequestId(requestId string) *NetworkEdgeSecurityServicesPatchCall { +// supported ( 00000000-0000-0000-0000-000000000000). end_interface: +// MixerMutationRequestBuilder +func (c *NetworkAttachmentsInsertCall) RequestId(requestId string) *NetworkAttachmentsInsertCall { c.urlParams_.Set("requestId", requestId) return c } -// UpdateMask sets the optional parameter "updateMask": Indicates fields -// to be updated as part of this request. -func (c *NetworkEdgeSecurityServicesPatchCall) UpdateMask(updateMask string) *NetworkEdgeSecurityServicesPatchCall { - c.urlParams_.Set("updateMask", updateMask) - return c -} - // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NetworkEdgeSecurityServicesPatchCall) Fields(s ...googleapi.Field) *NetworkEdgeSecurityServicesPatchCall { +func (c *NetworkAttachmentsInsertCall) Fields(s ...googleapi.Field) *NetworkAttachmentsInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -109395,21 +113945,21 @@ func (c *NetworkEdgeSecurityServicesPatchCall) Fields(s ...googleapi.Field) *Net // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NetworkEdgeSecurityServicesPatchCall) Context(ctx context.Context) *NetworkEdgeSecurityServicesPatchCall { +func (c *NetworkAttachmentsInsertCall) Context(ctx context.Context) *NetworkAttachmentsInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NetworkEdgeSecurityServicesPatchCall) Header() http.Header { +func (c *NetworkAttachmentsInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NetworkEdgeSecurityServicesPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworkAttachmentsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -109417,53 +113967,52 @@ func (c *NetworkEdgeSecurityServicesPatchCall) doRequest(alt string) (*http.Resp } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.networkedgesecurityservice) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.networkattachment) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/networkEdgeSecurityServices/{networkEdgeSecurityService}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/networkAttachments") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "networkEdgeSecurityService": c.networkEdgeSecurityService, + "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.networkEdgeSecurityServices.patch" call. +// Do executes the "compute.networkAttachments.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *NetworkEdgeSecurityServicesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *NetworkAttachmentsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -109477,28 +114026,15 @@ func (c *NetworkEdgeSecurityServicesPatchCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Patches the specified policy with the data included in the request.", - // "flatPath": "projects/{project}/regions/{region}/networkEdgeSecurityServices/{networkEdgeSecurityService}", - // "httpMethod": "PATCH", - // "id": "compute.networkEdgeSecurityServices.patch", + // "description": "Creates a NetworkAttachment in the specified project in the given scope using the parameters that are included in the request.", + // "flatPath": "projects/{project}/regions/{region}/networkAttachments", + // "httpMethod": "POST", + // "id": "compute.networkAttachments.insert", // "parameterOrder": [ // "project", - // "region", - // "networkEdgeSecurityService" + // "region" // ], // "parameters": { - // "networkEdgeSecurityService": { - // "description": "Name of the network edge security service to update.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, - // "paths": { - // "location": "query", - // "repeated": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -109507,27 +114043,21 @@ func (c *NetworkEdgeSecurityServicesPatchCall) Do(opts ...googleapi.CallOption) // "type": "string" // }, // "region": { - // "description": "Name of the region scoping this request.", + // "description": "Name of the region of this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "updateMask": { - // "description": "Indicates fields to be updated as part of this request.", - // "format": "google-fieldmask", + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000). end_interface: MixerMutationRequestBuilder", // "location": "query", // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/networkEdgeSecurityServices/{networkEdgeSecurityService}", + // "path": "projects/{project}/regions/{region}/networkAttachments", // "request": { - // "$ref": "NetworkEdgeSecurityService" + // "$ref": "NetworkAttachment" // }, // "response": { // "$ref": "Operation" @@ -109540,23 +114070,1706 @@ func (c *NetworkEdgeSecurityServicesPatchCall) Do(opts ...googleapi.CallOption) } -// method id "compute.networkEndpointGroups.aggregatedList": +// method id "compute.networkAttachments.list": -type NetworkEndpointGroupsAggregatedListCall struct { +type NetworkAttachmentsListCall struct { s *Service project string + region string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// AggregatedList: Retrieves the list of network endpoint groups and -// sorts them by zone. +// List: Lists the NetworkAttachments for a project in the given scope. // // - project: Project ID for this request. -func (r *NetworkEndpointGroupsService) AggregatedList(project string) *NetworkEndpointGroupsAggregatedListCall { - c := &NetworkEndpointGroupsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - region: Name of the region of this request. +func (r *NetworkAttachmentsService) List(project string, region string) *NetworkAttachmentsListCall { + c := &NetworkAttachmentsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic +// restarts. You can use filtering on nested fields to filter based on +// resource labels. To filter on multiple expressions, provide each +// separate expression within parentheses. For example: ``` +// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") +// ``` By default, each expression is an `AND` expression. However, you +// can include `AND` and `OR` expressions explicitly. For example: ``` +// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. +func (c *NetworkAttachmentsListCall) Filter(filter string) *NetworkAttachmentsListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *NetworkAttachmentsListCall) MaxResults(maxResults int64) *NetworkAttachmentsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. You can also sort results in +// descending order based on the creation timestamp using +// `orderBy="creationTimestamp desc". This sorts results based on the +// `creationTimestamp` field in reverse chronological order (newest +// result first). Use this to sort resources like operations so that the +// newest operation is returned first. Currently, only sorting by `name` +// or `creationTimestamp desc` is supported. +func (c *NetworkAttachmentsListCall) OrderBy(orderBy string) *NetworkAttachmentsListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *NetworkAttachmentsListCall) PageToken(pageToken string) *NetworkAttachmentsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false. +func (c *NetworkAttachmentsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *NetworkAttachmentsListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *NetworkAttachmentsListCall) Fields(s ...googleapi.Field) *NetworkAttachmentsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *NetworkAttachmentsListCall) IfNoneMatch(entityTag string) *NetworkAttachmentsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *NetworkAttachmentsListCall) Context(ctx context.Context) *NetworkAttachmentsListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *NetworkAttachmentsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *NetworkAttachmentsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/networkAttachments") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.networkAttachments.list" call. +// Exactly one of *NetworkAttachmentList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *NetworkAttachmentList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *NetworkAttachmentsListCall) Do(opts ...googleapi.CallOption) (*NetworkAttachmentList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &NetworkAttachmentList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists the NetworkAttachments for a project in the given scope.", + // "flatPath": "projects/{project}/regions/{region}/networkAttachments", + // "httpMethod": "GET", + // "id": "compute.networkAttachments.list", + // "parameterOrder": [ + // "project", + // "region" + // ], + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region of this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + // "location": "query", + // "type": "boolean" + // } + // }, + // "path": "projects/{project}/regions/{region}/networkAttachments", + // "response": { + // "$ref": "NetworkAttachmentList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *NetworkAttachmentsListCall) Pages(ctx context.Context, f func(*NetworkAttachmentList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.networkAttachments.setIamPolicy": + +type NetworkAttachmentsSetIamPolicyCall struct { + s *Service + project string + region string + resource string + regionsetpolicyrequest *RegionSetPolicyRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetIamPolicy: Sets the access control policy on the specified +// resource. Replaces any existing policy. +// +// - project: Project ID for this request. +// - region: The name of the region for this request. +// - resource: Name or id of the resource for this request. +func (r *NetworkAttachmentsService) SetIamPolicy(project string, region string, resource string, regionsetpolicyrequest *RegionSetPolicyRequest) *NetworkAttachmentsSetIamPolicyCall { + c := &NetworkAttachmentsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.resource = resource + c.regionsetpolicyrequest = regionsetpolicyrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *NetworkAttachmentsSetIamPolicyCall) Fields(s ...googleapi.Field) *NetworkAttachmentsSetIamPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *NetworkAttachmentsSetIamPolicyCall) Context(ctx context.Context) *NetworkAttachmentsSetIamPolicyCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *NetworkAttachmentsSetIamPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *NetworkAttachmentsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regionsetpolicyrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/networkAttachments/{resource}/setIamPolicy") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.networkAttachments.setIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *NetworkAttachmentsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Policy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + // "flatPath": "projects/{project}/regions/{region}/networkAttachments/{resource}/setIamPolicy", + // "httpMethod": "POST", + // "id": "compute.networkAttachments.setIamPolicy", + // "parameterOrder": [ + // "project", + // "region", + // "resource" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "The name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/regions/{region}/networkAttachments/{resource}/setIamPolicy", + // "request": { + // "$ref": "RegionSetPolicyRequest" + // }, + // "response": { + // "$ref": "Policy" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.networkAttachments.testIamPermissions": + +type NetworkAttachmentsTestIamPermissionsCall struct { + s *Service + project string + region string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +// +// - project: Project ID for this request. +// - region: The name of the region for this request. +// - resource: Name or id of the resource for this request. +func (r *NetworkAttachmentsService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *NetworkAttachmentsTestIamPermissionsCall { + c := &NetworkAttachmentsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *NetworkAttachmentsTestIamPermissionsCall) Fields(s ...googleapi.Field) *NetworkAttachmentsTestIamPermissionsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *NetworkAttachmentsTestIamPermissionsCall) Context(ctx context.Context) *NetworkAttachmentsTestIamPermissionsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *NetworkAttachmentsTestIamPermissionsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *NetworkAttachmentsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/networkAttachments/{resource}/testIamPermissions") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.networkAttachments.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *NetworkAttachmentsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &TestPermissionsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns permissions that a caller has on the specified resource.", + // "flatPath": "projects/{project}/regions/{region}/networkAttachments/{resource}/testIamPermissions", + // "httpMethod": "POST", + // "id": "compute.networkAttachments.testIamPermissions", + // "parameterOrder": [ + // "project", + // "region", + // "resource" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "The name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/regions/{region}/networkAttachments/{resource}/testIamPermissions", + // "request": { + // "$ref": "TestPermissionsRequest" + // }, + // "response": { + // "$ref": "TestPermissionsResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.networkEdgeSecurityServices.aggregatedList": + +type NetworkEdgeSecurityServicesAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// AggregatedList: Retrieves the list of all NetworkEdgeSecurityService +// resources available to the specified project. +// +// - project: Name of the project scoping this request. +func (r *NetworkEdgeSecurityServicesService) AggregatedList(project string) *NetworkEdgeSecurityServicesAggregatedListCall { + c := &NetworkEdgeSecurityServicesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic +// restarts. You can use filtering on nested fields to filter based on +// resource labels. To filter on multiple expressions, provide each +// separate expression within parentheses. For example: ``` +// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") +// ``` By default, each expression is an `AND` expression. However, you +// can include `AND` and `OR` expressions explicitly. For example: ``` +// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. +func (c *NetworkEdgeSecurityServicesAggregatedListCall) Filter(filter string) *NetworkEdgeSecurityServicesAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// IncludeAllScopes sets the optional parameter "includeAllScopes": +// Indicates whether every visible scope for each scope type (zone, +// region, global) should be included in the response. For new resource +// types added after this field, the flag has no effect as new resource +// types will always include every visible scope for each scope type in +// response. For resource types which predate this field, if this flag +// is omitted or false, only scopes of the scope types where the +// resource type is expected to be found will be included. +func (c *NetworkEdgeSecurityServicesAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *NetworkEdgeSecurityServicesAggregatedListCall { + c.urlParams_.Set("includeAllScopes", fmt.Sprint(includeAllScopes)) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *NetworkEdgeSecurityServicesAggregatedListCall) MaxResults(maxResults int64) *NetworkEdgeSecurityServicesAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. You can also sort results in +// descending order based on the creation timestamp using +// `orderBy="creationTimestamp desc". This sorts results based on the +// `creationTimestamp` field in reverse chronological order (newest +// result first). Use this to sort resources like operations so that the +// newest operation is returned first. Currently, only sorting by `name` +// or `creationTimestamp desc` is supported. +func (c *NetworkEdgeSecurityServicesAggregatedListCall) OrderBy(orderBy string) *NetworkEdgeSecurityServicesAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *NetworkEdgeSecurityServicesAggregatedListCall) PageToken(pageToken string) *NetworkEdgeSecurityServicesAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false. +func (c *NetworkEdgeSecurityServicesAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *NetworkEdgeSecurityServicesAggregatedListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *NetworkEdgeSecurityServicesAggregatedListCall) Fields(s ...googleapi.Field) *NetworkEdgeSecurityServicesAggregatedListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *NetworkEdgeSecurityServicesAggregatedListCall) IfNoneMatch(entityTag string) *NetworkEdgeSecurityServicesAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *NetworkEdgeSecurityServicesAggregatedListCall) Context(ctx context.Context) *NetworkEdgeSecurityServicesAggregatedListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *NetworkEdgeSecurityServicesAggregatedListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *NetworkEdgeSecurityServicesAggregatedListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/aggregated/networkEdgeSecurityServices") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.networkEdgeSecurityServices.aggregatedList" call. +// Exactly one of *NetworkEdgeSecurityServiceAggregatedList or error +// will be non-nil. Any non-2xx status code is an error. Response +// headers are in either +// *NetworkEdgeSecurityServiceAggregatedList.ServerResponse.Header or +// (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *NetworkEdgeSecurityServicesAggregatedListCall) Do(opts ...googleapi.CallOption) (*NetworkEdgeSecurityServiceAggregatedList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &NetworkEdgeSecurityServiceAggregatedList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of all NetworkEdgeSecurityService resources available to the specified project.", + // "flatPath": "projects/{project}/aggregated/networkEdgeSecurityServices", + // "httpMethod": "GET", + // "id": "compute.networkEdgeSecurityServices.aggregatedList", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "location": "query", + // "type": "string" + // }, + // "includeAllScopes": { + // "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + // "location": "query", + // "type": "boolean" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Name of the project scoping this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + // "location": "query", + // "type": "boolean" + // } + // }, + // "path": "projects/{project}/aggregated/networkEdgeSecurityServices", + // "response": { + // "$ref": "NetworkEdgeSecurityServiceAggregatedList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *NetworkEdgeSecurityServicesAggregatedListCall) Pages(ctx context.Context, f func(*NetworkEdgeSecurityServiceAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.networkEdgeSecurityServices.delete": + +type NetworkEdgeSecurityServicesDeleteCall struct { + s *Service + project string + region string + networkEdgeSecurityService string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes the specified service. +// +// - networkEdgeSecurityService: Name of the network edge security +// service to delete. +// - project: Project ID for this request. +// - region: Name of the region scoping this request. +func (r *NetworkEdgeSecurityServicesService) Delete(project string, region string, networkEdgeSecurityService string) *NetworkEdgeSecurityServicesDeleteCall { + c := &NetworkEdgeSecurityServicesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.networkEdgeSecurityService = networkEdgeSecurityService + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *NetworkEdgeSecurityServicesDeleteCall) RequestId(requestId string) *NetworkEdgeSecurityServicesDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *NetworkEdgeSecurityServicesDeleteCall) Fields(s ...googleapi.Field) *NetworkEdgeSecurityServicesDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *NetworkEdgeSecurityServicesDeleteCall) Context(ctx context.Context) *NetworkEdgeSecurityServicesDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *NetworkEdgeSecurityServicesDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *NetworkEdgeSecurityServicesDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/networkEdgeSecurityServices/{networkEdgeSecurityService}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "networkEdgeSecurityService": c.networkEdgeSecurityService, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.networkEdgeSecurityServices.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *NetworkEdgeSecurityServicesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified service.", + // "flatPath": "projects/{project}/regions/{region}/networkEdgeSecurityServices/{networkEdgeSecurityService}", + // "httpMethod": "DELETE", + // "id": "compute.networkEdgeSecurityServices.delete", + // "parameterOrder": [ + // "project", + // "region", + // "networkEdgeSecurityService" + // ], + // "parameters": { + // "networkEdgeSecurityService": { + // "description": "Name of the network edge security service to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "projects/{project}/regions/{region}/networkEdgeSecurityServices/{networkEdgeSecurityService}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.networkEdgeSecurityServices.get": + +type NetworkEdgeSecurityServicesGetCall struct { + s *Service + project string + region string + networkEdgeSecurityService string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Gets a specified NetworkEdgeSecurityService. +// +// - networkEdgeSecurityService: Name of the network edge security +// service to get. +// - project: Project ID for this request. +// - region: Name of the region scoping this request. +func (r *NetworkEdgeSecurityServicesService) Get(project string, region string, networkEdgeSecurityService string) *NetworkEdgeSecurityServicesGetCall { + c := &NetworkEdgeSecurityServicesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.networkEdgeSecurityService = networkEdgeSecurityService + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *NetworkEdgeSecurityServicesGetCall) Fields(s ...googleapi.Field) *NetworkEdgeSecurityServicesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *NetworkEdgeSecurityServicesGetCall) IfNoneMatch(entityTag string) *NetworkEdgeSecurityServicesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *NetworkEdgeSecurityServicesGetCall) Context(ctx context.Context) *NetworkEdgeSecurityServicesGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *NetworkEdgeSecurityServicesGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *NetworkEdgeSecurityServicesGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/networkEdgeSecurityServices/{networkEdgeSecurityService}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "networkEdgeSecurityService": c.networkEdgeSecurityService, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.networkEdgeSecurityServices.get" call. +// Exactly one of *NetworkEdgeSecurityService or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *NetworkEdgeSecurityService.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *NetworkEdgeSecurityServicesGetCall) Do(opts ...googleapi.CallOption) (*NetworkEdgeSecurityService, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &NetworkEdgeSecurityService{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets a specified NetworkEdgeSecurityService.", + // "flatPath": "projects/{project}/regions/{region}/networkEdgeSecurityServices/{networkEdgeSecurityService}", + // "httpMethod": "GET", + // "id": "compute.networkEdgeSecurityServices.get", + // "parameterOrder": [ + // "project", + // "region", + // "networkEdgeSecurityService" + // ], + // "parameters": { + // "networkEdgeSecurityService": { + // "description": "Name of the network edge security service to get.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/regions/{region}/networkEdgeSecurityServices/{networkEdgeSecurityService}", + // "response": { + // "$ref": "NetworkEdgeSecurityService" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.networkEdgeSecurityServices.insert": + +type NetworkEdgeSecurityServicesInsertCall struct { + s *Service + project string + region string + networkedgesecurityservice *NetworkEdgeSecurityService + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Insert: Creates a new service in the specified project using the data +// included in the request. +// +// - project: Project ID for this request. +// - region: Name of the region scoping this request. +func (r *NetworkEdgeSecurityServicesService) Insert(project string, region string, networkedgesecurityservice *NetworkEdgeSecurityService) *NetworkEdgeSecurityServicesInsertCall { + c := &NetworkEdgeSecurityServicesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.networkedgesecurityservice = networkedgesecurityservice + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *NetworkEdgeSecurityServicesInsertCall) RequestId(requestId string) *NetworkEdgeSecurityServicesInsertCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// ValidateOnly sets the optional parameter "validateOnly": If true, the +// request will not be committed. +func (c *NetworkEdgeSecurityServicesInsertCall) ValidateOnly(validateOnly bool) *NetworkEdgeSecurityServicesInsertCall { + c.urlParams_.Set("validateOnly", fmt.Sprint(validateOnly)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *NetworkEdgeSecurityServicesInsertCall) Fields(s ...googleapi.Field) *NetworkEdgeSecurityServicesInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *NetworkEdgeSecurityServicesInsertCall) Context(ctx context.Context) *NetworkEdgeSecurityServicesInsertCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *NetworkEdgeSecurityServicesInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *NetworkEdgeSecurityServicesInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.networkedgesecurityservice) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/networkEdgeSecurityServices") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.networkEdgeSecurityServices.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *NetworkEdgeSecurityServicesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a new service in the specified project using the data included in the request.", + // "flatPath": "projects/{project}/regions/{region}/networkEdgeSecurityServices", + // "httpMethod": "POST", + // "id": "compute.networkEdgeSecurityServices.insert", + // "parameterOrder": [ + // "project", + // "region" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "validateOnly": { + // "description": "If true, the request will not be committed.", + // "location": "query", + // "type": "boolean" + // } + // }, + // "path": "projects/{project}/regions/{region}/networkEdgeSecurityServices", + // "request": { + // "$ref": "NetworkEdgeSecurityService" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.networkEdgeSecurityServices.patch": + +type NetworkEdgeSecurityServicesPatchCall struct { + s *Service + project string + region string + networkEdgeSecurityService string + networkedgesecurityservice *NetworkEdgeSecurityService + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Patches the specified policy with the data included in the +// request. +// +// - networkEdgeSecurityService: Name of the network edge security +// service to update. +// - project: Project ID for this request. +// - region: Name of the region scoping this request. +func (r *NetworkEdgeSecurityServicesService) Patch(project string, region string, networkEdgeSecurityService string, networkedgesecurityservice *NetworkEdgeSecurityService) *NetworkEdgeSecurityServicesPatchCall { + c := &NetworkEdgeSecurityServicesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.networkEdgeSecurityService = networkEdgeSecurityService + c.networkedgesecurityservice = networkedgesecurityservice + return c +} + +// Paths sets the optional parameter "paths": +func (c *NetworkEdgeSecurityServicesPatchCall) Paths(paths ...string) *NetworkEdgeSecurityServicesPatchCall { + c.urlParams_.SetMulti("paths", append([]string{}, paths...)) + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *NetworkEdgeSecurityServicesPatchCall) RequestId(requestId string) *NetworkEdgeSecurityServicesPatchCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// UpdateMask sets the optional parameter "updateMask": Indicates fields +// to be updated as part of this request. +func (c *NetworkEdgeSecurityServicesPatchCall) UpdateMask(updateMask string) *NetworkEdgeSecurityServicesPatchCall { + c.urlParams_.Set("updateMask", updateMask) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *NetworkEdgeSecurityServicesPatchCall) Fields(s ...googleapi.Field) *NetworkEdgeSecurityServicesPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *NetworkEdgeSecurityServicesPatchCall) Context(ctx context.Context) *NetworkEdgeSecurityServicesPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *NetworkEdgeSecurityServicesPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *NetworkEdgeSecurityServicesPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.networkedgesecurityservice) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/networkEdgeSecurityServices/{networkEdgeSecurityService}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PATCH", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "networkEdgeSecurityService": c.networkEdgeSecurityService, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.networkEdgeSecurityServices.patch" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *NetworkEdgeSecurityServicesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Patches the specified policy with the data included in the request.", + // "flatPath": "projects/{project}/regions/{region}/networkEdgeSecurityServices/{networkEdgeSecurityService}", + // "httpMethod": "PATCH", + // "id": "compute.networkEdgeSecurityServices.patch", + // "parameterOrder": [ + // "project", + // "region", + // "networkEdgeSecurityService" + // ], + // "parameters": { + // "networkEdgeSecurityService": { + // "description": "Name of the network edge security service to update.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "paths": { + // "location": "query", + // "repeated": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "updateMask": { + // "description": "Indicates fields to be updated as part of this request.", + // "format": "google-fieldmask", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "projects/{project}/regions/{region}/networkEdgeSecurityServices/{networkEdgeSecurityService}", + // "request": { + // "$ref": "NetworkEdgeSecurityService" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.networkEndpointGroups.aggregatedList": + +type NetworkEndpointGroupsAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// AggregatedList: Retrieves the list of network endpoint groups and +// sorts them by zone. +// +// - project: Project ID for this request. +func (r *NetworkEndpointGroupsService) AggregatedList(project string) *NetworkEndpointGroupsAggregatedListCall { + c := &NetworkEndpointGroupsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project return c } @@ -109732,17 +115945,17 @@ func (c *NetworkEndpointGroupsAggregatedListCall) Do(opts ...googleapi.CallOptio if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &NetworkEndpointGroupAggregatedList{ ServerResponse: googleapi.ServerResponse{ @@ -109855,12 +116068,12 @@ type NetworkEndpointGroupsAttachNetworkEndpointsCall struct { // AttachNetworkEndpoints: Attach a list of network endpoints to the // specified network endpoint group. // -// - networkEndpointGroup: The name of the network endpoint group where -// you are attaching network endpoints to. It should comply with -// RFC1035. -// - project: Project ID for this request. -// - zone: The name of the zone where the network endpoint group is -// located. It should comply with RFC1035. +// - networkEndpointGroup: The name of the network endpoint group where +// you are attaching network endpoints to. It should comply with +// RFC1035. +// - project: Project ID for this request. +// - zone: The name of the zone where the network endpoint group is +// located. It should comply with RFC1035. func (r *NetworkEndpointGroupsService) AttachNetworkEndpoints(project string, zone string, networkEndpointGroup string, networkendpointgroupsattachendpointsrequest *NetworkEndpointGroupsAttachEndpointsRequest) *NetworkEndpointGroupsAttachNetworkEndpointsCall { c := &NetworkEndpointGroupsAttachNetworkEndpointsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -109955,17 +116168,17 @@ func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) Do(opts ...googleapi.C if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -110046,11 +116259,11 @@ type NetworkEndpointGroupsDeleteCall struct { // terminated when the NEG is deleted. Note that the NEG cannot be // deleted if there are backend services referencing it. // -// - networkEndpointGroup: The name of the network endpoint group to -// delete. It should comply with RFC1035. -// - project: Project ID for this request. -// - zone: The name of the zone where the network endpoint group is -// located. It should comply with RFC1035. +// - networkEndpointGroup: The name of the network endpoint group to +// delete. It should comply with RFC1035. +// - project: Project ID for this request. +// - zone: The name of the zone where the network endpoint group is +// located. It should comply with RFC1035. func (r *NetworkEndpointGroupsService) Delete(project string, zone string, networkEndpointGroup string) *NetworkEndpointGroupsDeleteCall { c := &NetworkEndpointGroupsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -110139,17 +116352,17 @@ func (c *NetworkEndpointGroupsDeleteCall) Do(opts ...googleapi.CallOption) (*Ope if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -110226,11 +116439,11 @@ type NetworkEndpointGroupsDetachNetworkEndpointsCall struct { // DetachNetworkEndpoints: Detach a list of network endpoints from the // specified network endpoint group. // -// - networkEndpointGroup: The name of the network endpoint group where -// you are removing network endpoints. It should comply with RFC1035. -// - project: Project ID for this request. -// - zone: The name of the zone where the network endpoint group is -// located. It should comply with RFC1035. +// - networkEndpointGroup: The name of the network endpoint group where +// you are removing network endpoints. It should comply with RFC1035. +// - project: Project ID for this request. +// - zone: The name of the zone where the network endpoint group is +// located. It should comply with RFC1035. func (r *NetworkEndpointGroupsService) DetachNetworkEndpoints(project string, zone string, networkEndpointGroup string, networkendpointgroupsdetachendpointsrequest *NetworkEndpointGroupsDetachEndpointsRequest) *NetworkEndpointGroupsDetachNetworkEndpointsCall { c := &NetworkEndpointGroupsDetachNetworkEndpointsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -110325,17 +116538,17 @@ func (c *NetworkEndpointGroupsDetachNetworkEndpointsCall) Do(opts ...googleapi.C if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -110412,14 +116625,13 @@ type NetworkEndpointGroupsGetCall struct { header_ http.Header } -// Get: Returns the specified network endpoint group. Gets a list of -// available network endpoint groups by making a list() request. +// Get: Returns the specified network endpoint group. // -// - networkEndpointGroup: The name of the network endpoint group. It -// should comply with RFC1035. -// - project: Project ID for this request. -// - zone: The name of the zone where the network endpoint group is -// located. It should comply with RFC1035. +// - networkEndpointGroup: The name of the network endpoint group. It +// should comply with RFC1035. +// - project: Project ID for this request. +// - zone: The name of the zone where the network endpoint group is +// located. It should comply with RFC1035. func (r *NetworkEndpointGroupsService) Get(project string, zone string, networkEndpointGroup string) *NetworkEndpointGroupsGetCall { c := &NetworkEndpointGroupsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -110505,17 +116717,17 @@ func (c *NetworkEndpointGroupsGetCall) Do(opts ...googleapi.CallOption) (*Networ if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &NetworkEndpointGroup{ ServerResponse: googleapi.ServerResponse{ @@ -110529,7 +116741,7 @@ func (c *NetworkEndpointGroupsGetCall) Do(opts ...googleapi.CallOption) (*Networ } return ret, nil // { - // "description": "Returns the specified network endpoint group. Gets a list of available network endpoint groups by making a list() request.", + // "description": "Returns the specified network endpoint group.", // "flatPath": "projects/{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}", // "httpMethod": "GET", // "id": "compute.networkEndpointGroups.get", @@ -110587,9 +116799,9 @@ type NetworkEndpointGroupsInsertCall struct { // Insert: Creates a network endpoint group in the specified project // using the parameters that are included in the request. // -// - project: Project ID for this request. -// - zone: The name of the zone where you want to create the network -// endpoint group. It should comply with RFC1035. +// - project: Project ID for this request. +// - zone: The name of the zone where you want to create the network +// endpoint group. It should comply with RFC1035. func (r *NetworkEndpointGroupsService) Insert(project string, zone string, networkendpointgroup *NetworkEndpointGroup) *NetworkEndpointGroupsInsertCall { c := &NetworkEndpointGroupsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -110682,17 +116894,17 @@ func (c *NetworkEndpointGroupsInsertCall) Do(opts ...googleapi.CallOption) (*Ope if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -110764,9 +116976,9 @@ type NetworkEndpointGroupsListCall struct { // List: Retrieves the list of network endpoint groups that are located // in the specified project and zone. // -// - project: Project ID for this request. -// - zone: The name of the zone where the network endpoint group is -// located. It should comply with RFC1035. +// - project: Project ID for this request. +// - zone: The name of the zone where the network endpoint group is +// located. It should comply with RFC1035. func (r *NetworkEndpointGroupsService) List(project string, zone string) *NetworkEndpointGroupsListCall { c := &NetworkEndpointGroupsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -110932,17 +117144,17 @@ func (c *NetworkEndpointGroupsListCall) Do(opts ...googleapi.CallOption) (*Netwo if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &NetworkEndpointGroupList{ ServerResponse: googleapi.ServerResponse{ @@ -111057,12 +117269,12 @@ type NetworkEndpointGroupsListNetworkEndpointsCall struct { // ListNetworkEndpoints: Lists the network endpoints in the specified // network endpoint group. // -// - networkEndpointGroup: The name of the network endpoint group from -// which you want to generate a list of included network endpoints. It -// should comply with RFC1035. -// - project: Project ID for this request. -// - zone: The name of the zone where the network endpoint group is -// located. It should comply with RFC1035. +// - networkEndpointGroup: The name of the network endpoint group from +// which you want to generate a list of included network endpoints. It +// should comply with RFC1035. +// - project: Project ID for this request. +// - zone: The name of the zone where the network endpoint group is +// located. It should comply with RFC1035. func (r *NetworkEndpointGroupsService) ListNetworkEndpoints(project string, zone string, networkEndpointGroup string, networkendpointgroupslistendpointsrequest *NetworkEndpointGroupsListEndpointsRequest) *NetworkEndpointGroupsListNetworkEndpointsCall { c := &NetworkEndpointGroupsListNetworkEndpointsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -111225,17 +117437,17 @@ func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Do(opts ...googleapi.Cal if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &NetworkEndpointGroupsListNetworkEndpoints{ ServerResponse: googleapi.ServerResponse{ @@ -111441,17 +117653,17 @@ func (c *NetworkEndpointGroupsTestIamPermissionsCall) Do(opts ...googleapi.CallO if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ @@ -111632,17 +117844,17 @@ func (c *NetworkFirewallPoliciesAddAssociationCall) Do(opts ...googleapi.CallOpt if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -111831,17 +118043,17 @@ func (c *NetworkFirewallPoliciesAddRuleCall) Do(opts ...googleapi.CallOption) (* if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -112019,17 +118231,17 @@ func (c *NetworkFirewallPoliciesCloneRulesCall) Do(opts ...googleapi.CallOption) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -112190,17 +118402,17 @@ func (c *NetworkFirewallPoliciesDeleteCall) Do(opts ...googleapi.CallOption) (*O if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -112354,17 +118566,17 @@ func (c *NetworkFirewallPoliciesGetCall) Do(opts ...googleapi.CallOption) (*Fire if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &FirewallPolicy{ ServerResponse: googleapi.ServerResponse{ @@ -112429,9 +118641,9 @@ type NetworkFirewallPoliciesGetAssociationCall struct { // GetAssociation: Gets an association with the specified name. // -// - firewallPolicy: Name of the firewall policy to which the queried -// association belongs. -// - project: Project ID for this request. +// - firewallPolicy: Name of the firewall policy to which the queried +// association belongs. +// - project: Project ID for this request. func (r *NetworkFirewallPoliciesService) GetAssociation(project string, firewallPolicy string) *NetworkFirewallPoliciesGetAssociationCall { c := &NetworkFirewallPoliciesGetAssociationCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -112522,17 +118734,17 @@ func (c *NetworkFirewallPoliciesGetAssociationCall) Do(opts ...googleapi.CallOpt if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &FirewallPolicyAssociation{ ServerResponse: googleapi.ServerResponse{ @@ -112695,17 +118907,17 @@ func (c *NetworkFirewallPoliciesGetIamPolicyCall) Do(opts ...googleapi.CallOptio if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Policy{ ServerResponse: googleapi.ServerResponse{ @@ -112776,9 +118988,9 @@ type NetworkFirewallPoliciesGetRuleCall struct { // GetRule: Gets a rule of the specified priority. // -// - firewallPolicy: Name of the firewall policy to which the queried -// rule belongs. -// - project: Project ID for this request. +// - firewallPolicy: Name of the firewall policy to which the queried +// rule belongs. +// - project: Project ID for this request. func (r *NetworkFirewallPoliciesService) GetRule(project string, firewallPolicy string) *NetworkFirewallPoliciesGetRuleCall { c := &NetworkFirewallPoliciesGetRuleCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -112869,17 +119081,17 @@ func (c *NetworkFirewallPoliciesGetRuleCall) Do(opts ...googleapi.CallOption) (* if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &FirewallPolicyRule{ ServerResponse: googleapi.ServerResponse{ @@ -113041,17 +119253,17 @@ func (c *NetworkFirewallPoliciesInsertCall) Do(opts ...googleapi.CallOption) (*O if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -113279,17 +119491,17 @@ func (c *NetworkFirewallPoliciesListCall) Do(opts ...googleapi.CallOption) (*Fir if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &FirewallPolicyList{ ServerResponse: googleapi.ServerResponse{ @@ -113490,17 +119702,17 @@ func (c *NetworkFirewallPoliciesPatchCall) Do(opts ...googleapi.CallOption) (*Op if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -113673,17 +119885,17 @@ func (c *NetworkFirewallPoliciesPatchRuleCall) Do(opts ...googleapi.CallOption) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -113856,17 +120068,17 @@ func (c *NetworkFirewallPoliciesRemoveAssociationCall) Do(opts ...googleapi.Call if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -114034,17 +120246,17 @@ func (c *NetworkFirewallPoliciesRemoveRuleCall) Do(opts ...googleapi.CallOption) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -114198,17 +120410,17 @@ func (c *NetworkFirewallPoliciesSetIamPolicyCall) Do(opts ...googleapi.CallOptio if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Policy{ ServerResponse: googleapi.ServerResponse{ @@ -114354,17 +120566,17 @@ func (c *NetworkFirewallPoliciesTestIamPermissionsCall) Do(opts ...googleapi.Cal if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ @@ -114526,17 +120738,17 @@ func (c *NetworksAddPeeringCall) Do(opts ...googleapi.CallOption) (*Operation, e if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -114695,17 +120907,17 @@ func (c *NetworksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -114772,8 +120984,7 @@ type NetworksGetCall struct { header_ http.Header } -// Get: Returns the specified network. Gets a list of available networks -// by making a list() request. +// Get: Returns the specified network. // // - network: Name of the network to return. // - project: Project ID for this request. @@ -114860,17 +121071,17 @@ func (c *NetworksGetCall) Do(opts ...googleapi.CallOption) (*Network, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Network{ ServerResponse: googleapi.ServerResponse{ @@ -114884,7 +121095,7 @@ func (c *NetworksGetCall) Do(opts ...googleapi.CallOption) (*Network, error) { } return ret, nil // { - // "description": "Returns the specified network. Gets a list of available networks by making a list() request.", + // "description": "Returns the specified network.", // "flatPath": "projects/{project}/global/networks/{network}", // "httpMethod": "GET", // "id": "compute.networks.get", @@ -115022,17 +121233,17 @@ func (c *NetworksGetEffectiveFirewallsCall) Do(opts ...googleapi.CallOption) (*N if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &NetworksGetEffectiveFirewallsResponse{ ServerResponse: googleapi.ServerResponse{ @@ -115188,17 +121399,17 @@ func (c *NetworksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -115426,17 +121637,17 @@ func (c *NetworksListCall) Do(opts ...googleapi.CallOption) (*NetworkList, error if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &NetworkList{ ServerResponse: googleapi.ServerResponse{ @@ -115556,8 +121767,9 @@ func (r *NetworksService) ListPeeringRoutes(project string, network string) *Net // the exchanged routes. // // Possible values: -// "INCOMING" - For routes exported from peer network. -// "OUTGOING" - For routes exported from local network. +// +// "INCOMING" - For routes exported from peer network. +// "OUTGOING" - For routes exported from local network. func (c *NetworksListPeeringRoutesCall) Direction(direction string) *NetworksListPeeringRoutesCall { c.urlParams_.Set("direction", direction) return c @@ -115736,17 +121948,17 @@ func (c *NetworksListPeeringRoutesCall) Do(opts ...googleapi.CallOption) (*Excha if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ExchangedPeeringRoutesList{ ServerResponse: googleapi.ServerResponse{ @@ -115979,17 +122191,17 @@ func (c *NetworksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -116155,17 +122367,17 @@ func (c *NetworksRemovePeeringCall) Do(opts ...googleapi.CallOption) (*Operation if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -116325,17 +122537,17 @@ func (c *NetworksSwitchToCustomModeCall) Do(opts ...googleapi.CallOption) (*Oper if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -116407,9 +122619,9 @@ type NetworksUpdatePeeringCall struct { // NetworkPeering.export_custom_routes field and the // NetworkPeering.import_custom_routes field. // -// - network: Name of the network resource which the updated peering is -// belonging to. -// - project: Project ID for this request. +// - network: Name of the network resource which the updated peering is +// belonging to. +// - project: Project ID for this request. func (r *NetworksService) UpdatePeering(project string, network string, networksupdatepeeringrequest *NetworksUpdatePeeringRequest) *NetworksUpdatePeeringCall { c := &NetworksUpdatePeeringCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -116502,17 +122714,17 @@ func (c *NetworksUpdatePeeringCall) Do(opts ...googleapi.CallOption) (*Operation if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -116682,17 +122894,17 @@ func (c *NodeGroupsAddNodesCall) Do(opts ...googleapi.CallOption) (*Operation, e if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -116949,17 +123161,17 @@ func (c *NodeGroupsAggregatedListCall) Do(opts ...googleapi.CallOption) (*NodeGr if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &NodeGroupAggregatedList{ ServerResponse: googleapi.ServerResponse{ @@ -117161,17 +123373,17 @@ func (c *NodeGroupsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, err if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -117249,10 +123461,10 @@ type NodeGroupsDeleteNodesCall struct { // DeleteNodes: Deletes specified nodes from the node group. // -// - nodeGroup: Name of the NodeGroup resource whose nodes will be -// deleted. -// - project: Project ID for this request. -// - zone: The name of the zone for this request. +// - nodeGroup: Name of the NodeGroup resource whose nodes will be +// deleted. +// - project: Project ID for this request. +// - zone: The name of the zone for this request. func (r *NodeGroupsService) DeleteNodes(project string, zone string, nodeGroup string, nodegroupsdeletenodesrequest *NodeGroupsDeleteNodesRequest) *NodeGroupsDeleteNodesCall { c := &NodeGroupsDeleteNodesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -117347,17 +123559,17 @@ func (c *NodeGroupsDeleteNodesCall) Do(opts ...googleapi.CallOption) (*Operation if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -117528,17 +123740,17 @@ func (c *NodeGroupsGetCall) Do(opts ...googleapi.CallOption) (*NodeGroup, error) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &NodeGroup{ ServerResponse: googleapi.ServerResponse{ @@ -117708,17 +123920,17 @@ func (c *NodeGroupsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Policy{ ServerResponse: googleapi.ServerResponse{ @@ -117894,17 +124106,17 @@ func (c *NodeGroupsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, err if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -118153,17 +124365,17 @@ func (c *NodeGroupsListCall) Do(opts ...googleapi.CallOption) (*NodeGroupList, e if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &NodeGroupList{ ServerResponse: googleapi.ServerResponse{ @@ -118277,10 +124489,10 @@ type NodeGroupsListNodesCall struct { // ListNodes: Lists nodes in the node group. // -// - nodeGroup: Name of the NodeGroup resource whose nodes you want to -// list. -// - project: Project ID for this request. -// - zone: The name of the zone for this request. +// - nodeGroup: Name of the NodeGroup resource whose nodes you want to +// list. +// - project: Project ID for this request. +// - zone: The name of the zone for this request. func (r *NodeGroupsService) ListNodes(project string, zone string, nodeGroup string) *NodeGroupsListNodesCall { c := &NodeGroupsListNodesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -118435,17 +124647,17 @@ func (c *NodeGroupsListNodesCall) Do(opts ...googleapi.CallOption) (*NodeGroupsL if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &NodeGroupsListNodes{ ServerResponse: googleapi.ServerResponse{ @@ -118665,17 +124877,17 @@ func (c *NodeGroupsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, erro if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -118838,17 +125050,17 @@ func (c *NodeGroupsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Policy{ ServerResponse: googleapi.ServerResponse{ @@ -119021,17 +125233,17 @@ func (c *NodeGroupsSetNodeTemplateCall) Do(opts ...googleapi.CallOption) (*Opera if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -119194,17 +125406,17 @@ func (c *NodeGroupsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Te if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ @@ -119456,17 +125668,17 @@ func (c *NodeTemplatesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Nod if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &NodeTemplateAggregatedList{ ServerResponse: googleapi.ServerResponse{ @@ -119668,17 +125880,17 @@ func (c *NodeTemplatesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -119754,8 +125966,7 @@ type NodeTemplatesGetCall struct { header_ http.Header } -// Get: Returns the specified node template. Gets a list of available -// node templates by making a list() request. +// Get: Returns the specified node template. // // - nodeTemplate: Name of the node template to return. // - project: Project ID for this request. @@ -119845,17 +126056,17 @@ func (c *NodeTemplatesGetCall) Do(opts ...googleapi.CallOption) (*NodeTemplate, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &NodeTemplate{ ServerResponse: googleapi.ServerResponse{ @@ -119869,7 +126080,7 @@ func (c *NodeTemplatesGetCall) Do(opts ...googleapi.CallOption) (*NodeTemplate, } return ret, nil // { - // "description": "Returns the specified node template. Gets a list of available node templates by making a list() request.", + // "description": "Returns the specified node template.", // "flatPath": "projects/{project}/regions/{region}/nodeTemplates/{nodeTemplate}", // "httpMethod": "GET", // "id": "compute.nodeTemplates.get", @@ -120025,17 +126236,17 @@ func (c *NodeTemplatesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Polic if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Policy{ ServerResponse: googleapi.ServerResponse{ @@ -120209,17 +126420,17 @@ func (c *NodeTemplatesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -120459,17 +126670,17 @@ func (c *NodeTemplatesListCall) Do(opts ...googleapi.CallOption) (*NodeTemplateL if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &NodeTemplateList{ ServerResponse: googleapi.ServerResponse{ @@ -120666,17 +126877,17 @@ func (c *NodeTemplatesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Polic if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Policy{ ServerResponse: googleapi.ServerResponse{ @@ -120834,17 +127045,17 @@ func (c *NodeTemplatesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) ( if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ @@ -121096,17 +127307,17 @@ func (c *NodeTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*NodeTyp if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &NodeTypeAggregatedList{ ServerResponse: googleapi.ServerResponse{ @@ -121216,8 +127427,7 @@ type NodeTypesGetCall struct { header_ http.Header } -// Get: Returns the specified node type. Gets a list of available node -// types by making a list() request. +// Get: Returns the specified node type. // // - nodeType: Name of the node type to return. // - project: Project ID for this request. @@ -121307,17 +127517,17 @@ func (c *NodeTypesGetCall) Do(opts ...googleapi.CallOption) (*NodeType, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &NodeType{ ServerResponse: googleapi.ServerResponse{ @@ -121331,7 +127541,7 @@ func (c *NodeTypesGetCall) Do(opts ...googleapi.CallOption) (*NodeType, error) { } return ret, nil // { - // "description": "Returns the specified node type. Gets a list of available node types by making a list() request.", + // "description": "Returns the specified node type.", // "flatPath": "projects/{project}/zones/{zone}/nodeTypes/{nodeType}", // "httpMethod": "GET", // "id": "compute.nodeTypes.get", @@ -121558,17 +127768,17 @@ func (c *NodeTypesListCall) Do(opts ...googleapi.CallOption) (*NodeTypeList, err if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &NodeTypeList{ ServerResponse: googleapi.ServerResponse{ @@ -121858,17 +128068,17 @@ func (c *PacketMirroringsAggregatedListCall) Do(opts ...googleapi.CallOption) (* if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &PacketMirroringAggregatedList{ ServerResponse: googleapi.ServerResponse{ @@ -122070,17 +128280,17 @@ func (c *PacketMirroringsDeleteCall) Do(opts ...googleapi.CallOption) (*Operatio if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -122246,17 +128456,17 @@ func (c *PacketMirroringsGetCall) Do(opts ...googleapi.CallOption) (*PacketMirro if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &PacketMirroring{ ServerResponse: googleapi.ServerResponse{ @@ -122424,17 +128634,17 @@ func (c *PacketMirroringsInsertCall) Do(opts ...googleapi.CallOption) (*Operatio if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -122674,17 +128884,17 @@ func (c *PacketMirroringsListCall) Do(opts ...googleapi.CallOption) (*PacketMirr if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &PacketMirroringList{ ServerResponse: googleapi.ServerResponse{ @@ -122898,17 +129108,17 @@ func (c *PacketMirroringsPatchCall) Do(opts ...googleapi.CallOption) (*Operation if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -123071,17 +129281,17 @@ func (c *PacketMirroringsTestIamPermissionsCall) Do(opts ...googleapi.CallOption if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ @@ -123240,17 +129450,17 @@ func (c *ProjectsDisableXpnHostCall) Do(opts ...googleapi.CallOption) (*Operatio if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -123402,17 +129612,17 @@ func (c *ProjectsDisableXpnResourceCall) Do(opts ...googleapi.CallOption) (*Oper if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -123559,17 +129769,17 @@ func (c *ProjectsEnableXpnHostCall) Do(opts ...googleapi.CallOption) (*Operation if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -123722,17 +129932,17 @@ func (c *ProjectsEnableXpnResourceCall) Do(opts ...googleapi.CallOption) (*Opera if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -123884,17 +130094,17 @@ func (c *ProjectsGetCall) Do(opts ...googleapi.CallOption) (*Project, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Project{ ServerResponse: googleapi.ServerResponse{ @@ -124033,17 +130243,17 @@ func (c *ProjectsGetXpnHostCall) Do(opts ...googleapi.CallOption) (*Project, err if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Project{ ServerResponse: googleapi.ServerResponse{ @@ -124263,17 +130473,17 @@ func (c *ProjectsGetXpnResourcesCall) Do(opts ...googleapi.CallOption) (*Project if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ProjectsGetXpnResources{ ServerResponse: googleapi.ServerResponse{ @@ -124535,17 +130745,17 @@ func (c *ProjectsListXpnHostsCall) Do(opts ...googleapi.CallOption) (*XpnHostLis if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &XpnHostList{ ServerResponse: googleapi.ServerResponse{ @@ -124743,17 +130953,17 @@ func (c *ProjectsMoveDiskCall) Do(opts ...googleapi.CallOption) (*Operation, err if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -124912,17 +131122,17 @@ func (c *ProjectsMoveInstanceCall) Do(opts ...googleapi.CallOption) (*Operation, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -125077,17 +131287,17 @@ func (c *ProjectsSetCommonInstanceMetadataCall) Do(opts ...googleapi.CallOption) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -125244,17 +131454,17 @@ func (c *ProjectsSetDefaultNetworkTierCall) Do(opts ...googleapi.CallOption) (*O if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -125411,17 +131621,17 @@ func (c *ProjectsSetUsageExportBucketCall) Do(opts ...googleapi.CallOption) (*Op if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -125487,9 +131697,9 @@ type PublicAdvertisedPrefixesDeleteCall struct { // Delete: Deletes the specified PublicAdvertisedPrefix // -// - project: Project ID for this request. -// - publicAdvertisedPrefix: Name of the PublicAdvertisedPrefix resource -// to delete. +// - project: Project ID for this request. +// - publicAdvertisedPrefix: Name of the PublicAdvertisedPrefix resource +// to delete. func (r *PublicAdvertisedPrefixesService) Delete(project string, publicAdvertisedPrefix string) *PublicAdvertisedPrefixesDeleteCall { c := &PublicAdvertisedPrefixesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -125576,17 +131786,17 @@ func (c *PublicAdvertisedPrefixesDeleteCall) Do(opts ...googleapi.CallOption) (* if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -125655,9 +131865,9 @@ type PublicAdvertisedPrefixesGetCall struct { // Get: Returns the specified PublicAdvertisedPrefix resource. // -// - project: Project ID for this request. -// - publicAdvertisedPrefix: Name of the PublicAdvertisedPrefix resource -// to return. +// - project: Project ID for this request. +// - publicAdvertisedPrefix: Name of the PublicAdvertisedPrefix resource +// to return. func (r *PublicAdvertisedPrefixesService) Get(project string, publicAdvertisedPrefix string) *PublicAdvertisedPrefixesGetCall { c := &PublicAdvertisedPrefixesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -125741,17 +131951,17 @@ func (c *PublicAdvertisedPrefixesGetCall) Do(opts ...googleapi.CallOption) (*Pub if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &PublicAdvertisedPrefix{ ServerResponse: googleapi.ServerResponse{ @@ -125907,17 +132117,17 @@ func (c *PublicAdvertisedPrefixesInsertCall) Do(opts ...googleapi.CallOption) (* if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -126144,17 +132354,17 @@ func (c *PublicAdvertisedPrefixesListCall) Do(opts ...googleapi.CallOption) (*Pu if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &PublicAdvertisedPrefixList{ ServerResponse: googleapi.ServerResponse{ @@ -126262,9 +132472,9 @@ type PublicAdvertisedPrefixesPatchCall struct { // in the request. This method supports PATCH semantics and uses JSON // merge patch format and processing rules. // -// - project: Project ID for this request. -// - publicAdvertisedPrefix: Name of the PublicAdvertisedPrefix resource -// to patch. +// - project: Project ID for this request. +// - publicAdvertisedPrefix: Name of the PublicAdvertisedPrefix resource +// to patch. func (r *PublicAdvertisedPrefixesService) Patch(project string, publicAdvertisedPrefix string, publicadvertisedprefix *PublicAdvertisedPrefix) *PublicAdvertisedPrefixesPatchCall { c := &PublicAdvertisedPrefixesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -126357,17 +132567,17 @@ func (c *PublicAdvertisedPrefixesPatchCall) Do(opts ...googleapi.CallOption) (*O if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -126617,17 +132827,17 @@ func (c *PublicDelegatedPrefixesAggregatedListCall) Do(opts ...googleapi.CallOpt if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &PublicDelegatedPrefixAggregatedList{ ServerResponse: googleapi.ServerResponse{ @@ -126739,10 +132949,10 @@ type PublicDelegatedPrefixesDeleteCall struct { // Delete: Deletes the specified PublicDelegatedPrefix in the given // region. // -// - project: Project ID for this request. -// - publicDelegatedPrefix: Name of the PublicDelegatedPrefix resource -// to delete. -// - region: Name of the region of this request. +// - project: Project ID for this request. +// - publicDelegatedPrefix: Name of the PublicDelegatedPrefix resource +// to delete. +// - region: Name of the region of this request. func (r *PublicDelegatedPrefixesService) Delete(project string, region string, publicDelegatedPrefix string) *PublicDelegatedPrefixesDeleteCall { c := &PublicDelegatedPrefixesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -126831,17 +133041,17 @@ func (c *PublicDelegatedPrefixesDeleteCall) Do(opts ...googleapi.CallOption) (*O if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -126920,10 +133130,10 @@ type PublicDelegatedPrefixesGetCall struct { // Get: Returns the specified PublicDelegatedPrefix resource in the // given region. // -// - project: Project ID for this request. -// - publicDelegatedPrefix: Name of the PublicDelegatedPrefix resource -// to return. -// - region: Name of the region of this request. +// - project: Project ID for this request. +// - publicDelegatedPrefix: Name of the PublicDelegatedPrefix resource +// to return. +// - region: Name of the region of this request. func (r *PublicDelegatedPrefixesService) Get(project string, region string, publicDelegatedPrefix string) *PublicDelegatedPrefixesGetCall { c := &PublicDelegatedPrefixesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -127009,17 +133219,17 @@ func (c *PublicDelegatedPrefixesGetCall) Do(opts ...googleapi.CallOption) (*Publ if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &PublicDelegatedPrefix{ ServerResponse: googleapi.ServerResponse{ @@ -127188,17 +133398,17 @@ func (c *PublicDelegatedPrefixesInsertCall) Do(opts ...googleapi.CallOption) (*O if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -127438,17 +133648,17 @@ func (c *PublicDelegatedPrefixesListCall) Do(opts ...googleapi.CallOption) (*Pub if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &PublicDelegatedPrefixList{ ServerResponse: googleapi.ServerResponse{ @@ -127565,10 +133775,10 @@ type PublicDelegatedPrefixesPatchCall struct { // data included in the request. This method supports PATCH semantics // and uses JSON merge patch format and processing rules. // -// - project: Project ID for this request. -// - publicDelegatedPrefix: Name of the PublicDelegatedPrefix resource -// to patch. -// - region: Name of the region for this request. +// - project: Project ID for this request. +// - publicDelegatedPrefix: Name of the PublicDelegatedPrefix resource +// to patch. +// - region: Name of the region for this request. func (r *PublicDelegatedPrefixesService) Patch(project string, region string, publicDelegatedPrefix string, publicdelegatedprefix *PublicDelegatedPrefix) *PublicDelegatedPrefixesPatchCall { c := &PublicDelegatedPrefixesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -127663,17 +133873,17 @@ func (c *PublicDelegatedPrefixesPatchCall) Do(opts ...googleapi.CallOption) (*Op if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -127844,17 +134054,17 @@ func (c *RegionAutoscalersDeleteCall) Do(opts ...googleapi.CallOption) (*Operati if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -128020,17 +134230,17 @@ func (c *RegionAutoscalersGetCall) Do(opts ...googleapi.CallOption) (*Autoscaler if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Autoscaler{ ServerResponse: googleapi.ServerResponse{ @@ -128198,17 +134408,17 @@ func (c *RegionAutoscalersInsertCall) Do(opts ...googleapi.CallOption) (*Operati if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -128448,17 +134658,17 @@ func (c *RegionAutoscalersListCall) Do(opts ...googleapi.CallOption) (*RegionAut if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &RegionAutoscalerList{ ServerResponse: googleapi.ServerResponse{ @@ -128675,17 +134885,17 @@ func (c *RegionAutoscalersPatchCall) Do(opts ...googleapi.CallOption) (*Operatio if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -128865,17 +135075,17 @@ func (c *RegionAutoscalersUpdateCall) Do(opts ...googleapi.CallOption) (*Operati if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -129044,17 +135254,17 @@ func (c *RegionBackendServicesDeleteCall) Do(opts ...googleapi.CallOption) (*Ope if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -129220,17 +135430,17 @@ func (c *RegionBackendServicesGetCall) Do(opts ...googleapi.CallOption) (*Backen if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &BackendService{ ServerResponse: googleapi.ServerResponse{ @@ -129305,10 +135515,10 @@ type RegionBackendServicesGetHealthCall struct { // GetHealth: Gets the most recent health check results for this // regional BackendService. // -// - backendService: Name of the BackendService resource for which to -// get health. -// - project: . -// - region: Name of the region scoping this request. +// - backendService: Name of the BackendService resource for which to +// get health. +// - project: . +// - region: Name of the region scoping this request. func (r *RegionBackendServicesService) GetHealth(project string, region string, backendService string, resourcegroupreference *ResourceGroupReference) *RegionBackendServicesGetHealthCall { c := &RegionBackendServicesGetHealthCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -129387,17 +135597,17 @@ func (c *RegionBackendServicesGetHealthCall) Do(opts ...googleapi.CallOption) (* if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &BackendServiceGroupHealth{ ServerResponse: googleapi.ServerResponse{ @@ -129458,6 +135668,192 @@ func (c *RegionBackendServicesGetHealthCall) Do(opts ...googleapi.CallOption) (* } +// method id "compute.regionBackendServices.getIamPolicy": + +type RegionBackendServicesGetIamPolicyCall struct { + s *Service + project string + region string + resource string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// GetIamPolicy: Gets the access control policy for a resource. May be +// empty if no such policy or resource exists. +// +// - project: Project ID for this request. +// - region: The name of the region for this request. +// - resource: Name or id of the resource for this request. +func (r *RegionBackendServicesService) GetIamPolicy(project string, region string, resource string) *RegionBackendServicesGetIamPolicyCall { + c := &RegionBackendServicesGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.resource = resource + return c +} + +// OptionsRequestedPolicyVersion sets the optional parameter +// "optionsRequestedPolicyVersion": Requested IAM Policy version. +func (c *RegionBackendServicesGetIamPolicyCall) OptionsRequestedPolicyVersion(optionsRequestedPolicyVersion int64) *RegionBackendServicesGetIamPolicyCall { + c.urlParams_.Set("optionsRequestedPolicyVersion", fmt.Sprint(optionsRequestedPolicyVersion)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionBackendServicesGetIamPolicyCall) Fields(s ...googleapi.Field) *RegionBackendServicesGetIamPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionBackendServicesGetIamPolicyCall) IfNoneMatch(entityTag string) *RegionBackendServicesGetIamPolicyCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionBackendServicesGetIamPolicyCall) Context(ctx context.Context) *RegionBackendServicesGetIamPolicyCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionBackendServicesGetIamPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionBackendServicesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/backendServices/{resource}/getIamPolicy") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionBackendServices.getIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *RegionBackendServicesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Policy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", + // "flatPath": "projects/{project}/regions/{region}/backendServices/{resource}/getIamPolicy", + // "httpMethod": "GET", + // "id": "compute.regionBackendServices.getIamPolicy", + // "parameterOrder": [ + // "project", + // "region", + // "resource" + // ], + // "parameters": { + // "optionsRequestedPolicyVersion": { + // "description": "Requested IAM Policy version.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "The name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/regions/{region}/backendServices/{resource}/getIamPolicy", + // "response": { + // "$ref": "Policy" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + // method id "compute.regionBackendServices.insert": type RegionBackendServicesInsertCall struct { @@ -129568,17 +135964,17 @@ func (c *RegionBackendServicesInsertCall) Do(opts ...googleapi.CallOption) (*Ope if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -129818,17 +136214,17 @@ func (c *RegionBackendServicesListCall) Do(opts ...googleapi.CallOption) (*Backe if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &BackendServiceList{ ServerResponse: googleapi.ServerResponse{ @@ -130043,17 +136439,17 @@ func (c *RegionBackendServicesPatchCall) Do(opts ...googleapi.CallOption) (*Oper if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -130119,6 +136515,174 @@ func (c *RegionBackendServicesPatchCall) Do(opts ...googleapi.CallOption) (*Oper } +// method id "compute.regionBackendServices.setIamPolicy": + +type RegionBackendServicesSetIamPolicyCall struct { + s *Service + project string + region string + resource string + regionsetpolicyrequest *RegionSetPolicyRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetIamPolicy: Sets the access control policy on the specified +// resource. Replaces any existing policy. +// +// - project: Project ID for this request. +// - region: The name of the region for this request. +// - resource: Name or id of the resource for this request. +func (r *RegionBackendServicesService) SetIamPolicy(project string, region string, resource string, regionsetpolicyrequest *RegionSetPolicyRequest) *RegionBackendServicesSetIamPolicyCall { + c := &RegionBackendServicesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.resource = resource + c.regionsetpolicyrequest = regionsetpolicyrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionBackendServicesSetIamPolicyCall) Fields(s ...googleapi.Field) *RegionBackendServicesSetIamPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionBackendServicesSetIamPolicyCall) Context(ctx context.Context) *RegionBackendServicesSetIamPolicyCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionBackendServicesSetIamPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionBackendServicesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regionsetpolicyrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/backendServices/{resource}/setIamPolicy") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionBackendServices.setIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *RegionBackendServicesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Policy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + // "flatPath": "projects/{project}/regions/{region}/backendServices/{resource}/setIamPolicy", + // "httpMethod": "POST", + // "id": "compute.regionBackendServices.setIamPolicy", + // "parameterOrder": [ + // "project", + // "region", + // "resource" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "The name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/regions/{region}/backendServices/{resource}/setIamPolicy", + // "request": { + // "$ref": "RegionSetPolicyRequest" + // }, + // "response": { + // "$ref": "Policy" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + // method id "compute.regionBackendServices.update": type RegionBackendServicesUpdateCall struct { @@ -130233,17 +136797,17 @@ func (c *RegionBackendServicesUpdateCall) Do(opts ...googleapi.CallOption) (*Ope if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -130500,17 +137064,17 @@ func (c *RegionCommitmentsAggregatedListCall) Do(opts ...googleapi.CallOption) ( if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &CommitmentAggregatedList{ ServerResponse: googleapi.ServerResponse{ @@ -130620,8 +137184,7 @@ type RegionCommitmentsGetCall struct { header_ http.Header } -// Get: Returns the specified commitment resource. Gets a list of -// available commitments by making a list() request. +// Get: Returns the specified commitment resource. // // - commitment: Name of the commitment to return. // - project: Project ID for this request. @@ -130711,17 +137274,17 @@ func (c *RegionCommitmentsGetCall) Do(opts ...googleapi.CallOption) (*Commitment if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Commitment{ ServerResponse: googleapi.ServerResponse{ @@ -130735,7 +137298,7 @@ func (c *RegionCommitmentsGetCall) Do(opts ...googleapi.CallOption) (*Commitment } return ret, nil // { - // "description": "Returns the specified commitment resource. Gets a list of available commitments by making a list() request.", + // "description": "Returns the specified commitment resource.", // "flatPath": "projects/{project}/regions/{region}/commitments/{commitment}", // "httpMethod": "GET", // "id": "compute.regionCommitments.get", @@ -130889,17 +137452,17 @@ func (c *RegionCommitmentsInsertCall) Do(opts ...googleapi.CallOption) (*Operati if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -131139,17 +137702,17 @@ func (c *RegionCommitmentsListCall) Do(opts ...googleapi.CallOption) (*Commitmen if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &CommitmentList{ ServerResponse: googleapi.ServerResponse{ @@ -131267,10 +137830,10 @@ type RegionCommitmentsUpdateCall struct { // part of update-mask. Only the following fields can be modified: // auto_renew. // -// - commitment: Name of the commitment for which auto renew is being -// updated. -// - project: Project ID for this request. -// - region: Name of the region for this request. +// - commitment: Name of the commitment for which auto renew is being +// updated. +// - project: Project ID for this request. +// - region: Name of the region for this request. func (r *RegionCommitmentsService) Update(project string, region string, commitment string, commitment2 *Commitment) *RegionCommitmentsUpdateCall { c := &RegionCommitmentsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -131378,17 +137941,17 @@ func (c *RegionCommitmentsUpdateCall) Do(opts ...googleapi.CallOption) (*Operati if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -131478,8 +138041,7 @@ type RegionDiskTypesGetCall struct { header_ http.Header } -// Get: Returns the specified regional disk type. Gets a list of -// available disk types by making a list() request. +// Get: Returns the specified regional disk type. // // - diskType: Name of the disk type to return. // - project: Project ID for this request. @@ -131569,17 +138131,17 @@ func (c *RegionDiskTypesGetCall) Do(opts ...googleapi.CallOption) (*DiskType, er if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &DiskType{ ServerResponse: googleapi.ServerResponse{ @@ -131593,7 +138155,7 @@ func (c *RegionDiskTypesGetCall) Do(opts ...googleapi.CallOption) (*DiskType, er } return ret, nil // { - // "description": "Returns the specified regional disk type. Gets a list of available disk types by making a list() request.", + // "description": "Returns the specified regional disk type.", // "flatPath": "projects/{project}/regions/{region}/diskTypes/{diskType}", // "httpMethod": "GET", // "id": "compute.regionDiskTypes.get", @@ -131820,17 +138382,17 @@ func (c *RegionDiskTypesListCall) Do(opts ...googleapi.CallOption) (*RegionDiskT if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &RegionDiskTypeList{ ServerResponse: googleapi.ServerResponse{ @@ -132044,17 +138606,17 @@ func (c *RegionDisksAddResourcePoliciesCall) Do(opts ...googleapi.CallOption) (* if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -132235,17 +138797,17 @@ func (c *RegionDisksCreateSnapshotCall) Do(opts ...googleapi.CallOption) (*Opera if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -132419,17 +138981,17 @@ func (c *RegionDisksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, er if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -132594,17 +139156,17 @@ func (c *RegionDisksGetCall) Do(opts ...googleapi.CallOption) (*Disk, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Disk{ ServerResponse: googleapi.ServerResponse{ @@ -132774,17 +139336,17 @@ func (c *RegionDisksGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Policy{ ServerResponse: googleapi.ServerResponse{ @@ -132965,17 +139527,17 @@ func (c *RegionDisksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, er if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -133220,17 +139782,17 @@ func (c *RegionDisksListCall) Do(opts ...googleapi.CallOption) (*DiskList, error if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &DiskList{ ServerResponse: googleapi.ServerResponse{ @@ -133443,17 +140005,17 @@ func (c *RegionDisksRemoveResourcePoliciesCall) Do(opts ...googleapi.CallOption) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -133631,17 +140193,17 @@ func (c *RegionDisksResizeCall) Do(opts ...googleapi.CallOption) (*Operation, er if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -133804,17 +140366,17 @@ func (c *RegionDisksSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Policy{ ServerResponse: googleapi.ServerResponse{ @@ -133987,17 +140549,17 @@ func (c *RegionDisksSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -134160,17 +140722,17 @@ func (c *RegionDisksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*T if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ @@ -134232,6 +140794,221 @@ func (c *RegionDisksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*T } +// method id "compute.regionDisks.update": + +type RegionDisksUpdateCall struct { + s *Service + project string + region string + disk string + disk2 *Disk + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Update: Update the specified disk with the data included in the +// request. Update is performed only on selected fields included as part +// of update-mask. Only the following fields can be modified: +// user_license. +// +// - disk: The disk name for this request. +// - project: Project ID for this request. +// - region: The name of the region for this request. +func (r *RegionDisksService) Update(project string, region string, disk string, disk2 *Disk) *RegionDisksUpdateCall { + c := &RegionDisksUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.disk = disk + c.disk2 = disk2 + return c +} + +// Paths sets the optional parameter "paths": +func (c *RegionDisksUpdateCall) Paths(paths ...string) *RegionDisksUpdateCall { + c.urlParams_.SetMulti("paths", append([]string{}, paths...)) + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *RegionDisksUpdateCall) RequestId(requestId string) *RegionDisksUpdateCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// UpdateMask sets the optional parameter "updateMask": update_mask +// indicates fields to be updated as part of this request. +func (c *RegionDisksUpdateCall) UpdateMask(updateMask string) *RegionDisksUpdateCall { + c.urlParams_.Set("updateMask", updateMask) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionDisksUpdateCall) Fields(s ...googleapi.Field) *RegionDisksUpdateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionDisksUpdateCall) Context(ctx context.Context) *RegionDisksUpdateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionDisksUpdateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionDisksUpdateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.disk2) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/disks/{disk}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PATCH", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "disk": c.disk, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionDisks.update" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionDisksUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Update the specified disk with the data included in the request. Update is performed only on selected fields included as part of update-mask. Only the following fields can be modified: user_license.", + // "flatPath": "projects/{project}/regions/{region}/disks/{disk}", + // "httpMethod": "PATCH", + // "id": "compute.regionDisks.update", + // "parameterOrder": [ + // "project", + // "region", + // "disk" + // ], + // "parameters": { + // "disk": { + // "description": "The disk name for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "paths": { + // "location": "query", + // "repeated": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "The name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "updateMask": { + // "description": "update_mask indicates fields to be updated as part of this request.", + // "format": "google-fieldmask", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "projects/{project}/regions/{region}/disks/{disk}", + // "request": { + // "$ref": "Disk" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + // method id "compute.regionHealthCheckServices.delete": type RegionHealthCheckServicesDeleteCall struct { @@ -134246,10 +141023,10 @@ type RegionHealthCheckServicesDeleteCall struct { // Delete: Deletes the specified regional HealthCheckService. // -// - healthCheckService: Name of the HealthCheckService to delete. The -// name must be 1-63 characters long, and comply with RFC1035. -// - project: Project ID for this request. -// - region: Name of the region scoping this request. +// - healthCheckService: Name of the HealthCheckService to delete. The +// name must be 1-63 characters long, and comply with RFC1035. +// - project: Project ID for this request. +// - region: Name of the region scoping this request. func (r *RegionHealthCheckServicesService) Delete(project string, region string, healthCheckService string) *RegionHealthCheckServicesDeleteCall { c := &RegionHealthCheckServicesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -134338,17 +141115,17 @@ func (c *RegionHealthCheckServicesDeleteCall) Do(opts ...googleapi.CallOption) ( if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -134425,10 +141202,10 @@ type RegionHealthCheckServicesGetCall struct { // Get: Returns the specified regional HealthCheckService resource. // -// - healthCheckService: Name of the HealthCheckService to update. The -// name must be 1-63 characters long, and comply with RFC1035. -// - project: Project ID for this request. -// - region: Name of the region scoping this request. +// - healthCheckService: Name of the HealthCheckService to update. The +// name must be 1-63 characters long, and comply with RFC1035. +// - project: Project ID for this request. +// - region: Name of the region scoping this request. func (r *RegionHealthCheckServicesService) Get(project string, region string, healthCheckService string) *RegionHealthCheckServicesGetCall { c := &RegionHealthCheckServicesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -134514,17 +141291,17 @@ func (c *RegionHealthCheckServicesGetCall) Do(opts ...googleapi.CallOption) (*He if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &HealthCheckService{ ServerResponse: googleapi.ServerResponse{ @@ -134691,17 +141468,17 @@ func (c *RegionHealthCheckServicesInsertCall) Do(opts ...googleapi.CallOption) ( if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -134941,17 +141718,17 @@ func (c *RegionHealthCheckServicesListCall) Do(opts ...googleapi.CallOption) (*H if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &HealthCheckServicesList{ ServerResponse: googleapi.ServerResponse{ @@ -135068,10 +141845,10 @@ type RegionHealthCheckServicesPatchCall struct { // with the data included in the request. This method supports PATCH // semantics and uses the JSON merge patch format and processing rules. // -// - healthCheckService: Name of the HealthCheckService to update. The -// name must be 1-63 characters long, and comply with RFC1035. -// - project: Project ID for this request. -// - region: Name of the region scoping this request. +// - healthCheckService: Name of the HealthCheckService to update. The +// name must be 1-63 characters long, and comply with RFC1035. +// - project: Project ID for this request. +// - region: Name of the region scoping this request. func (r *RegionHealthCheckServicesService) Patch(project string, region string, healthCheckService string, healthcheckservice *HealthCheckService) *RegionHealthCheckServicesPatchCall { c := &RegionHealthCheckServicesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -135166,17 +141943,17 @@ func (c *RegionHealthCheckServicesPatchCall) Do(opts ...googleapi.CallOption) (* if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -135346,17 +142123,17 @@ func (c *RegionHealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operat if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -135432,8 +142209,7 @@ type RegionHealthChecksGetCall struct { header_ http.Header } -// Get: Returns the specified HealthCheck resource. Gets a list of -// available health checks by making a list() request. +// Get: Returns the specified HealthCheck resource. // // - healthCheck: Name of the HealthCheck resource to return. // - project: Project ID for this request. @@ -135523,17 +142299,17 @@ func (c *RegionHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HealthChe if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &HealthCheck{ ServerResponse: googleapi.ServerResponse{ @@ -135547,7 +142323,7 @@ func (c *RegionHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HealthChe } return ret, nil // { - // "description": "Returns the specified HealthCheck resource. Gets a list of available health checks by making a list() request.", + // "description": "Returns the specified HealthCheck resource.", // "flatPath": "projects/{project}/regions/{region}/healthChecks/{healthCheck}", // "httpMethod": "GET", // "id": "compute.regionHealthChecks.get", @@ -135701,17 +142477,17 @@ func (c *RegionHealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operat if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -135951,17 +142727,17 @@ func (c *RegionHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HealthCh if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &HealthCheckList{ ServerResponse: googleapi.ServerResponse{ @@ -136175,17 +142951,17 @@ func (c *RegionHealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operati if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -136364,17 +143140,17 @@ func (c *RegionHealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operat if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -136565,17 +143341,17 @@ func (c *RegionInstanceGroupManagersAbandonInstancesCall) Do(opts ...googleapi.C if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -136655,11 +143431,11 @@ type RegionInstanceGroupManagersApplyUpdatesToInstancesCall struct { // ApplyUpdatesToInstances: Apply updates to selected instances the // managed instance group. // -// - instanceGroupManager: The name of the managed instance group, -// should conform to RFC1035. -// - project: Project ID for this request. -// - region: Name of the region scoping this request, should conform to -// RFC1035. +// - instanceGroupManager: The name of the managed instance group, +// should conform to RFC1035. +// - project: Project ID for this request. +// - region: Name of the region scoping this request, should conform to +// RFC1035. func (r *RegionInstanceGroupManagersService) ApplyUpdatesToInstances(project string, region string, instanceGroupManager string, regioninstancegroupmanagersapplyupdatesrequest *RegionInstanceGroupManagersApplyUpdatesRequest) *RegionInstanceGroupManagersApplyUpdatesToInstancesCall { c := &RegionInstanceGroupManagersApplyUpdatesToInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -136738,17 +143514,17 @@ func (c *RegionInstanceGroupManagersApplyUpdatesToInstancesCall) Do(opts ...goog if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -136828,11 +143604,11 @@ type RegionInstanceGroupManagersCreateInstancesCall struct { // the status of the creating or actions with the listmanagedinstances // method. // -// - instanceGroupManager: The name of the managed instance group. It -// should conform to RFC1035. -// - project: Project ID for this request. -// - region: The name of the region where the managed instance group is -// located. It should conform to RFC1035. +// - instanceGroupManager: The name of the managed instance group. It +// should conform to RFC1035. +// - project: Project ID for this request. +// - region: The name of the region where the managed instance group is +// located. It should conform to RFC1035. func (r *RegionInstanceGroupManagersService) CreateInstances(project string, region string, instanceGroupManager string, regioninstancegroupmanagerscreateinstancesrequest *RegionInstanceGroupManagersCreateInstancesRequest) *RegionInstanceGroupManagersCreateInstancesCall { c := &RegionInstanceGroupManagersCreateInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -136926,17 +143702,17 @@ func (c *RegionInstanceGroupManagersCreateInstancesCall) Do(opts ...googleapi.Ca if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -137106,17 +143882,17 @@ func (c *RegionInstanceGroupManagersDeleteCall) Do(opts ...googleapi.CallOption) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -137301,17 +144077,17 @@ func (c *RegionInstanceGroupManagersDeleteInstancesCall) Do(opts ...googleapi.Ca if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -137391,11 +144167,11 @@ type RegionInstanceGroupManagersDeletePerInstanceConfigsCall struct { // DeletePerInstanceConfigs: Deletes selected per-instance // configurations for the managed instance group. // -// - instanceGroupManager: The name of the managed instance group. It -// should conform to RFC1035. -// - project: Project ID for this request. -// - region: Name of the region scoping this request, should conform to -// RFC1035. +// - instanceGroupManager: The name of the managed instance group. It +// should conform to RFC1035. +// - project: Project ID for this request. +// - region: Name of the region scoping this request, should conform to +// RFC1035. func (r *RegionInstanceGroupManagersService) DeletePerInstanceConfigs(project string, region string, instanceGroupManager string, regioninstancegroupmanagerdeleteinstanceconfigreq *RegionInstanceGroupManagerDeleteInstanceConfigReq) *RegionInstanceGroupManagersDeletePerInstanceConfigsCall { c := &RegionInstanceGroupManagersDeletePerInstanceConfigsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -137474,17 +144250,17 @@ func (c *RegionInstanceGroupManagersDeletePerInstanceConfigsCall) Do(opts ...goo if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -137647,17 +144423,17 @@ func (c *RegionInstanceGroupManagersGetCall) Do(opts ...googleapi.CallOption) (* if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &InstanceGroupManager{ ServerResponse: googleapi.ServerResponse{ @@ -137829,17 +144605,17 @@ func (c *RegionInstanceGroupManagersInsertCall) Do(opts ...googleapi.CallOption) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -138078,17 +144854,17 @@ func (c *RegionInstanceGroupManagersListCall) Do(opts ...googleapi.CallOption) ( if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &RegionInstanceGroupManagerList{ ServerResponse: googleapi.ServerResponse{ @@ -138204,13 +144980,13 @@ type RegionInstanceGroupManagersListErrorsCall struct { // given regional managed instance group. The filter and orderBy query // parameters are not supported. // -// - instanceGroupManager: The name of the managed instance group. It -// must be a string that meets the requirements in RFC1035, or an -// unsigned long integer: must match regexp pattern: (?:a-z -// (?:[-a-z0-9]{0,61}[a-z0-9])?)|1-9{0,19}. -// - project: Project ID for this request. -// - region: Name of the region scoping this request. This should -// conform to RFC1035. +// - instanceGroupManager: The name of the managed instance group. It +// must be a string that meets the requirements in RFC1035, or an +// unsigned long integer: must match regexp pattern: (?:a-z +// (?:[-a-z0-9]{0,61}[a-z0-9])?)|1-9{0,19}. +// - project: Project ID for this request. +// - region: Name of the region scoping this request. This should +// conform to RFC1035. func (r *RegionInstanceGroupManagersService) ListErrors(project string, region string, instanceGroupManager string) *RegionInstanceGroupManagersListErrorsCall { c := &RegionInstanceGroupManagersListErrorsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -138380,17 +145156,17 @@ func (c *RegionInstanceGroupManagersListErrorsCall) Do(opts ...googleapi.CallOpt if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &RegionInstanceGroupManagersListErrorsResponse{ ServerResponse: googleapi.ServerResponse{ @@ -138675,17 +145451,17 @@ func (c *RegionInstanceGroupManagersListManagedInstancesCall) Do(opts ...googlea if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &RegionInstanceGroupManagersListInstancesResponse{ ServerResponse: googleapi.ServerResponse{ @@ -138807,11 +145583,11 @@ type RegionInstanceGroupManagersListPerInstanceConfigsCall struct { // defined for the managed instance group. The orderBy query parameter // is not supported. // -// - instanceGroupManager: The name of the managed instance group. It -// should conform to RFC1035. -// - project: Project ID for this request. -// - region: Name of the region scoping this request, should conform to -// RFC1035. +// - instanceGroupManager: The name of the managed instance group. It +// should conform to RFC1035. +// - project: Project ID for this request. +// - region: Name of the region scoping this request, should conform to +// RFC1035. func (r *RegionInstanceGroupManagersService) ListPerInstanceConfigs(project string, region string, instanceGroupManager string) *RegionInstanceGroupManagersListPerInstanceConfigsCall { c := &RegionInstanceGroupManagersListPerInstanceConfigsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -138968,17 +145744,17 @@ func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) Do(opts ...googl if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &RegionInstanceGroupManagersListInstanceConfigsResp{ ServerResponse: googleapi.ServerResponse{ @@ -139206,17 +145982,17 @@ func (c *RegionInstanceGroupManagersPatchCall) Do(opts ...googleapi.CallOption) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -139298,11 +146074,11 @@ type RegionInstanceGroupManagersPatchPerInstanceConfigsCall struct { // serves as a key used to distinguish whether to perform insert or // patch. // -// - instanceGroupManager: The name of the managed instance group. It -// should conform to RFC1035. -// - project: Project ID for this request. -// - region: Name of the region scoping this request, should conform to -// RFC1035. +// - instanceGroupManager: The name of the managed instance group. It +// should conform to RFC1035. +// - project: Project ID for this request. +// - region: Name of the region scoping this request, should conform to +// RFC1035. func (r *RegionInstanceGroupManagersService) PatchPerInstanceConfigs(project string, region string, instanceGroupManager string, regioninstancegroupmanagerpatchinstanceconfigreq *RegionInstanceGroupManagerPatchInstanceConfigReq) *RegionInstanceGroupManagersPatchPerInstanceConfigsCall { c := &RegionInstanceGroupManagersPatchPerInstanceConfigsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -139397,17 +146173,17 @@ func (c *RegionInstanceGroupManagersPatchPerInstanceConfigsCall) Do(opts ...goog if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -139593,17 +146369,17 @@ func (c *RegionInstanceGroupManagersRecreateInstancesCall) Do(opts ...googleapi. if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -139690,11 +146466,11 @@ type RegionInstanceGroupManagersResizeCall struct { // draining, it can take up to 60 seconds after the connection draining // duration has elapsed before the VM instance is removed or deleted. // -// - instanceGroupManager: Name of the managed instance group. -// - project: Project ID for this request. -// - region: Name of the region scoping this request. -// - size: Number of instances that should exist in this instance group -// manager. +// - instanceGroupManager: Name of the managed instance group. +// - project: Project ID for this request. +// - region: Name of the region scoping this request. +// - size: Number of instances that should exist in this instance group +// manager. func (r *RegionInstanceGroupManagersService) Resize(project string, region string, instanceGroupManager string, size int64) *RegionInstanceGroupManagersResizeCall { c := &RegionInstanceGroupManagersResizeCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -139784,17 +146560,17 @@ func (c *RegionInstanceGroupManagersResizeCall) Do(opts ...googleapi.CallOption) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -139978,17 +146754,17 @@ func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) Do(opts ...googleap if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -140166,17 +146942,17 @@ func (c *RegionInstanceGroupManagersSetTargetPoolsCall) Do(opts ...googleapi.Cal if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -140258,11 +147034,11 @@ type RegionInstanceGroupManagersUpdatePerInstanceConfigsCall struct { // serves as a key used to distinguish whether to perform insert or // patch. // -// - instanceGroupManager: The name of the managed instance group. It -// should conform to RFC1035. -// - project: Project ID for this request. -// - region: Name of the region scoping this request, should conform to -// RFC1035. +// - instanceGroupManager: The name of the managed instance group. It +// should conform to RFC1035. +// - project: Project ID for this request. +// - region: Name of the region scoping this request, should conform to +// RFC1035. func (r *RegionInstanceGroupManagersService) UpdatePerInstanceConfigs(project string, region string, instanceGroupManager string, regioninstancegroupmanagerupdateinstanceconfigreq *RegionInstanceGroupManagerUpdateInstanceConfigReq) *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall { c := &RegionInstanceGroupManagersUpdatePerInstanceConfigsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -140357,17 +147133,17 @@ func (c *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall) Do(opts ...goo if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -140534,17 +147310,17 @@ func (c *RegionInstanceGroupsGetCall) Do(opts ...googleapi.CallOption) (*Instanc if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &InstanceGroup{ ServerResponse: googleapi.ServerResponse{ @@ -140783,17 +147559,17 @@ func (c *RegionInstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*Region if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &RegionInstanceGroupList{ ServerResponse: googleapi.ServerResponse{ @@ -140911,10 +147687,10 @@ type RegionInstanceGroupsListInstancesCall struct { // instances that are running. The orderBy query parameter is not // supported. // -// - instanceGroup: Name of the regional instance group for which we -// want to list the instances. -// - project: Project ID for this request. -// - region: Name of the region scoping this request. +// - instanceGroup: Name of the regional instance group for which we +// want to list the instances. +// - project: Project ID for this request. +// - region: Name of the region scoping this request. func (r *RegionInstanceGroupsService) ListInstances(project string, region string, instanceGroup string, regioninstancegroupslistinstancesrequest *RegionInstanceGroupsListInstancesRequest) *RegionInstanceGroupsListInstancesCall { c := &RegionInstanceGroupsListInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -140959,7 +147735,1019 @@ func (r *RegionInstanceGroupsService) ListInstances(project string, region strin // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne // .*instance`. -func (c *RegionInstanceGroupsListInstancesCall) Filter(filter string) *RegionInstanceGroupsListInstancesCall { +func (c *RegionInstanceGroupsListInstancesCall) Filter(filter string) *RegionInstanceGroupsListInstancesCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *RegionInstanceGroupsListInstancesCall) MaxResults(maxResults int64) *RegionInstanceGroupsListInstancesCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. You can also sort results in +// descending order based on the creation timestamp using +// `orderBy="creationTimestamp desc". This sorts results based on the +// `creationTimestamp` field in reverse chronological order (newest +// result first). Use this to sort resources like operations so that the +// newest operation is returned first. Currently, only sorting by `name` +// or `creationTimestamp desc` is supported. +func (c *RegionInstanceGroupsListInstancesCall) OrderBy(orderBy string) *RegionInstanceGroupsListInstancesCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *RegionInstanceGroupsListInstancesCall) PageToken(pageToken string) *RegionInstanceGroupsListInstancesCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false. +func (c *RegionInstanceGroupsListInstancesCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionInstanceGroupsListInstancesCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionInstanceGroupsListInstancesCall) Fields(s ...googleapi.Field) *RegionInstanceGroupsListInstancesCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionInstanceGroupsListInstancesCall) Context(ctx context.Context) *RegionInstanceGroupsListInstancesCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionInstanceGroupsListInstancesCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionInstanceGroupsListInstancesCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupslistinstancesrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceGroups/{instanceGroup}/listInstances") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "instanceGroup": c.instanceGroup, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionInstanceGroups.listInstances" call. +// Exactly one of *RegionInstanceGroupsListInstances or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *RegionInstanceGroupsListInstances.ServerResponse.Header or +// (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *RegionInstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) (*RegionInstanceGroupsListInstances, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &RegionInstanceGroupsListInstances{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists the instances in the specified instance group and displays information about the named ports. Depending on the specified options, this method can list all instances or only the instances that are running. The orderBy query parameter is not supported.", + // "flatPath": "projects/{project}/regions/{region}/instanceGroups/{instanceGroup}/listInstances", + // "httpMethod": "POST", + // "id": "compute.regionInstanceGroups.listInstances", + // "parameterOrder": [ + // "project", + // "region", + // "instanceGroup" + // ], + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "location": "query", + // "type": "string" + // }, + // "instanceGroup": { + // "description": "Name of the regional instance group for which we want to list the instances.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + // "location": "query", + // "type": "boolean" + // } + // }, + // "path": "projects/{project}/regions/{region}/instanceGroups/{instanceGroup}/listInstances", + // "request": { + // "$ref": "RegionInstanceGroupsListInstancesRequest" + // }, + // "response": { + // "$ref": "RegionInstanceGroupsListInstances" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *RegionInstanceGroupsListInstancesCall) Pages(ctx context.Context, f func(*RegionInstanceGroupsListInstances) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.regionInstanceGroups.setNamedPorts": + +type RegionInstanceGroupsSetNamedPortsCall struct { + s *Service + project string + region string + instanceGroup string + regioninstancegroupssetnamedportsrequest *RegionInstanceGroupsSetNamedPortsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetNamedPorts: Sets the named ports for the specified regional +// instance group. +// +// - instanceGroup: The name of the regional instance group where the +// named ports are updated. +// - project: Project ID for this request. +// - region: Name of the region scoping this request. +func (r *RegionInstanceGroupsService) SetNamedPorts(project string, region string, instanceGroup string, regioninstancegroupssetnamedportsrequest *RegionInstanceGroupsSetNamedPortsRequest) *RegionInstanceGroupsSetNamedPortsCall { + c := &RegionInstanceGroupsSetNamedPortsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.instanceGroup = instanceGroup + c.regioninstancegroupssetnamedportsrequest = regioninstancegroupssetnamedportsrequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *RegionInstanceGroupsSetNamedPortsCall) RequestId(requestId string) *RegionInstanceGroupsSetNamedPortsCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionInstanceGroupsSetNamedPortsCall) Fields(s ...googleapi.Field) *RegionInstanceGroupsSetNamedPortsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionInstanceGroupsSetNamedPortsCall) Context(ctx context.Context) *RegionInstanceGroupsSetNamedPortsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionInstanceGroupsSetNamedPortsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionInstanceGroupsSetNamedPortsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupssetnamedportsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceGroups/{instanceGroup}/setNamedPorts") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "instanceGroup": c.instanceGroup, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionInstanceGroups.setNamedPorts" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionInstanceGroupsSetNamedPortsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Sets the named ports for the specified regional instance group.", + // "flatPath": "projects/{project}/regions/{region}/instanceGroups/{instanceGroup}/setNamedPorts", + // "httpMethod": "POST", + // "id": "compute.regionInstanceGroups.setNamedPorts", + // "parameterOrder": [ + // "project", + // "region", + // "instanceGroup" + // ], + // "parameters": { + // "instanceGroup": { + // "description": "The name of the regional instance group where the named ports are updated.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "projects/{project}/regions/{region}/instanceGroups/{instanceGroup}/setNamedPorts", + // "request": { + // "$ref": "RegionInstanceGroupsSetNamedPortsRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.regionInstanceTemplates.delete": + +type RegionInstanceTemplatesDeleteCall struct { + s *Service + project string + region string + instanceTemplate string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes the specified instance template. Deleting an instance +// template is permanent and cannot be undone. +// +// - instanceTemplate: The name of the instance template to delete. +// - project: Project ID for this request. +// - region: The name of the region for this request. +func (r *RegionInstanceTemplatesService) Delete(project string, region string, instanceTemplate string) *RegionInstanceTemplatesDeleteCall { + c := &RegionInstanceTemplatesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.instanceTemplate = instanceTemplate + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *RegionInstanceTemplatesDeleteCall) RequestId(requestId string) *RegionInstanceTemplatesDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionInstanceTemplatesDeleteCall) Fields(s ...googleapi.Field) *RegionInstanceTemplatesDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionInstanceTemplatesDeleteCall) Context(ctx context.Context) *RegionInstanceTemplatesDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionInstanceTemplatesDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionInstanceTemplatesDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceTemplates/{instanceTemplate}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "instanceTemplate": c.instanceTemplate, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionInstanceTemplates.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionInstanceTemplatesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified instance template. Deleting an instance template is permanent and cannot be undone.", + // "flatPath": "projects/{project}/regions/{region}/instanceTemplates/{instanceTemplate}", + // "httpMethod": "DELETE", + // "id": "compute.regionInstanceTemplates.delete", + // "parameterOrder": [ + // "project", + // "region", + // "instanceTemplate" + // ], + // "parameters": { + // "instanceTemplate": { + // "description": "The name of the instance template to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "The name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "projects/{project}/regions/{region}/instanceTemplates/{instanceTemplate}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.regionInstanceTemplates.get": + +type RegionInstanceTemplatesGetCall struct { + s *Service + project string + region string + instanceTemplate string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns the specified instance template. +// +// - instanceTemplate: The name of the instance template. +// - project: Project ID for this request. +// - region: The name of the region for this request. +func (r *RegionInstanceTemplatesService) Get(project string, region string, instanceTemplate string) *RegionInstanceTemplatesGetCall { + c := &RegionInstanceTemplatesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.instanceTemplate = instanceTemplate + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionInstanceTemplatesGetCall) Fields(s ...googleapi.Field) *RegionInstanceTemplatesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionInstanceTemplatesGetCall) IfNoneMatch(entityTag string) *RegionInstanceTemplatesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionInstanceTemplatesGetCall) Context(ctx context.Context) *RegionInstanceTemplatesGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionInstanceTemplatesGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionInstanceTemplatesGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceTemplates/{instanceTemplate}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "instanceTemplate": c.instanceTemplate, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionInstanceTemplates.get" call. +// Exactly one of *InstanceTemplate or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *InstanceTemplate.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RegionInstanceTemplatesGetCall) Do(opts ...googleapi.CallOption) (*InstanceTemplate, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &InstanceTemplate{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified instance template.", + // "flatPath": "projects/{project}/regions/{region}/instanceTemplates/{instanceTemplate}", + // "httpMethod": "GET", + // "id": "compute.regionInstanceTemplates.get", + // "parameterOrder": [ + // "project", + // "region", + // "instanceTemplate" + // ], + // "parameters": { + // "instanceTemplate": { + // "description": "The name of the instance template.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "The name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/regions/{region}/instanceTemplates/{instanceTemplate}", + // "response": { + // "$ref": "InstanceTemplate" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.regionInstanceTemplates.insert": + +type RegionInstanceTemplatesInsertCall struct { + s *Service + project string + region string + instancetemplate *InstanceTemplate + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Insert: Creates an instance template in the specified project and +// region using the global instance template whose URL is included in +// the request. +// +// - project: Project ID for this request. +// - region: The name of the region for this request. +func (r *RegionInstanceTemplatesService) Insert(project string, region string, instancetemplate *InstanceTemplate) *RegionInstanceTemplatesInsertCall { + c := &RegionInstanceTemplatesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.instancetemplate = instancetemplate + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *RegionInstanceTemplatesInsertCall) RequestId(requestId string) *RegionInstanceTemplatesInsertCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionInstanceTemplatesInsertCall) Fields(s ...googleapi.Field) *RegionInstanceTemplatesInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionInstanceTemplatesInsertCall) Context(ctx context.Context) *RegionInstanceTemplatesInsertCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionInstanceTemplatesInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionInstanceTemplatesInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancetemplate) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceTemplates") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionInstanceTemplates.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionInstanceTemplatesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates an instance template in the specified project and region using the global instance template whose URL is included in the request.", + // "flatPath": "projects/{project}/regions/{region}/instanceTemplates", + // "httpMethod": "POST", + // "id": "compute.regionInstanceTemplates.insert", + // "parameterOrder": [ + // "project", + // "region" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "The name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "projects/{project}/regions/{region}/instanceTemplates", + // "request": { + // "$ref": "InstanceTemplate" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.regionInstanceTemplates.list": + +type RegionInstanceTemplatesListCall struct { + s *Service + project string + region string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Retrieves a list of instance templates that are contained +// within the specified project and region. +// +// - project: Project ID for this request. +// - region: The name of the regions for this request. +func (r *RegionInstanceTemplatesService) List(project string, region string) *RegionInstanceTemplatesListCall { + c := &RegionInstanceTemplatesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic +// restarts. You can use filtering on nested fields to filter based on +// resource labels. To filter on multiple expressions, provide each +// separate expression within parentheses. For example: ``` +// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") +// ``` By default, each expression is an `AND` expression. However, you +// can include `AND` and `OR` expressions explicitly. For example: ``` +// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. +func (c *RegionInstanceTemplatesListCall) Filter(filter string) *RegionInstanceTemplatesListCall { c.urlParams_.Set("filter", filter) return c } @@ -140970,7 +148758,7 @@ func (c *RegionInstanceGroupsListInstancesCall) Filter(filter string) *RegionIns // a `nextPageToken` that can be used to get the next page of results in // subsequent list requests. Acceptable values are `0` to `500`, // inclusive. (Default: `500`) -func (c *RegionInstanceGroupsListInstancesCall) MaxResults(maxResults int64) *RegionInstanceGroupsListInstancesCall { +func (c *RegionInstanceTemplatesListCall) MaxResults(maxResults int64) *RegionInstanceTemplatesListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -140984,7 +148772,7 @@ func (c *RegionInstanceGroupsListInstancesCall) MaxResults(maxResults int64) *Re // result first). Use this to sort resources like operations so that the // newest operation is returned first. Currently, only sorting by `name` // or `creationTimestamp desc` is supported. -func (c *RegionInstanceGroupsListInstancesCall) OrderBy(orderBy string) *RegionInstanceGroupsListInstancesCall { +func (c *RegionInstanceTemplatesListCall) OrderBy(orderBy string) *RegionInstanceTemplatesListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -140992,7 +148780,7 @@ func (c *RegionInstanceGroupsListInstancesCall) OrderBy(orderBy string) *RegionI // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. -func (c *RegionInstanceGroupsListInstancesCall) PageToken(pageToken string) *RegionInstanceGroupsListInstancesCall { +func (c *RegionInstanceTemplatesListCall) PageToken(pageToken string) *RegionInstanceTemplatesListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -141001,7 +148789,7 @@ func (c *RegionInstanceGroupsListInstancesCall) PageToken(pageToken string) *Reg // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is // false. -func (c *RegionInstanceGroupsListInstancesCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionInstanceGroupsListInstancesCall { +func (c *RegionInstanceTemplatesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionInstanceTemplatesListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c } @@ -141009,86 +148797,92 @@ func (c *RegionInstanceGroupsListInstancesCall) ReturnPartialSuccess(returnParti // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupsListInstancesCall) Fields(s ...googleapi.Field) *RegionInstanceGroupsListInstancesCall { +func (c *RegionInstanceTemplatesListCall) Fields(s ...googleapi.Field) *RegionInstanceTemplatesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionInstanceTemplatesListCall) IfNoneMatch(entityTag string) *RegionInstanceTemplatesListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupsListInstancesCall) Context(ctx context.Context) *RegionInstanceGroupsListInstancesCall { +func (c *RegionInstanceTemplatesListCall) Context(ctx context.Context) *RegionInstanceTemplatesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupsListInstancesCall) Header() http.Header { +func (c *RegionInstanceTemplatesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupsListInstancesCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionInstanceTemplatesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupslistinstancesrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceGroups/{instanceGroup}/listInstances") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceTemplates") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "instanceGroup": c.instanceGroup, + "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroups.listInstances" call. -// Exactly one of *RegionInstanceGroupsListInstances or error will be -// non-nil. Any non-2xx status code is an error. Response headers are in -// either *RegionInstanceGroupsListInstances.ServerResponse.Header or -// (if a response was returned at all) in -// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check -// whether the returned error was because http.StatusNotModified was -// returned. -func (c *RegionInstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) (*RegionInstanceGroupsListInstances, error) { +// Do executes the "compute.regionInstanceTemplates.list" call. +// Exactly one of *InstanceTemplateList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *InstanceTemplateList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RegionInstanceTemplatesListCall) Do(opts ...googleapi.CallOption) (*InstanceTemplateList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &RegionInstanceGroupsListInstances{ + ret := &InstanceTemplateList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -141100,14 +148894,13 @@ func (c *RegionInstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Lists the instances in the specified instance group and displays information about the named ports. Depending on the specified options, this method can list all instances or only the instances that are running. The orderBy query parameter is not supported.", - // "flatPath": "projects/{project}/regions/{region}/instanceGroups/{instanceGroup}/listInstances", - // "httpMethod": "POST", - // "id": "compute.regionInstanceGroups.listInstances", + // "description": "Retrieves a list of instance templates that are contained within the specified project and region.", + // "flatPath": "projects/{project}/regions/{region}/instanceTemplates", + // "httpMethod": "GET", + // "id": "compute.regionInstanceTemplates.list", // "parameterOrder": [ // "project", - // "region", - // "instanceGroup" + // "region" // ], // "parameters": { // "filter": { @@ -141115,12 +148908,6 @@ func (c *RegionInstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) // "location": "query", // "type": "string" // }, - // "instanceGroup": { - // "description": "Name of the regional instance group for which we want to list the instances.", - // "location": "path", - // "required": true, - // "type": "string" - // }, // "maxResults": { // "default": "500", // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", @@ -141147,8 +148934,9 @@ func (c *RegionInstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) // "type": "string" // }, // "region": { - // "description": "Name of the region scoping this request.", + // "description": "The name of the regions for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -141158,12 +148946,9 @@ func (c *RegionInstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) // "type": "boolean" // } // }, - // "path": "projects/{project}/regions/{region}/instanceGroups/{instanceGroup}/listInstances", - // "request": { - // "$ref": "RegionInstanceGroupsListInstancesRequest" - // }, + // "path": "projects/{project}/regions/{region}/instanceTemplates", // "response": { - // "$ref": "RegionInstanceGroupsListInstances" + // "$ref": "InstanceTemplateList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -141177,7 +148962,7 @@ func (c *RegionInstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *RegionInstanceGroupsListInstancesCall) Pages(ctx context.Context, f func(*RegionInstanceGroupsListInstances) error) error { +func (c *RegionInstanceTemplatesListCall) Pages(ctx context.Context, f func(*InstanceTemplateList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -141195,194 +148980,6 @@ func (c *RegionInstanceGroupsListInstancesCall) Pages(ctx context.Context, f fun } } -// method id "compute.regionInstanceGroups.setNamedPorts": - -type RegionInstanceGroupsSetNamedPortsCall struct { - s *Service - project string - region string - instanceGroup string - regioninstancegroupssetnamedportsrequest *RegionInstanceGroupsSetNamedPortsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// SetNamedPorts: Sets the named ports for the specified regional -// instance group. -// -// - instanceGroup: The name of the regional instance group where the -// named ports are updated. -// - project: Project ID for this request. -// - region: Name of the region scoping this request. -func (r *RegionInstanceGroupsService) SetNamedPorts(project string, region string, instanceGroup string, regioninstancegroupssetnamedportsrequest *RegionInstanceGroupsSetNamedPortsRequest) *RegionInstanceGroupsSetNamedPortsCall { - c := &RegionInstanceGroupsSetNamedPortsCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.instanceGroup = instanceGroup - c.regioninstancegroupssetnamedportsrequest = regioninstancegroupssetnamedportsrequest - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. For example, consider a -// situation where you make an initial request and the request times -// out. If you make the request again with the same request ID, the -// server can check if original operation with the same request ID was -// received, and if so, will ignore the second request. This prevents -// clients from accidentally creating duplicate commitments. The request -// ID must be a valid UUID with the exception that zero UUID is not -// supported ( 00000000-0000-0000-0000-000000000000). -func (c *RegionInstanceGroupsSetNamedPortsCall) RequestId(requestId string) *RegionInstanceGroupsSetNamedPortsCall { - c.urlParams_.Set("requestId", requestId) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *RegionInstanceGroupsSetNamedPortsCall) Fields(s ...googleapi.Field) *RegionInstanceGroupsSetNamedPortsCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *RegionInstanceGroupsSetNamedPortsCall) Context(ctx context.Context) *RegionInstanceGroupsSetNamedPortsCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *RegionInstanceGroupsSetNamedPortsCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *RegionInstanceGroupsSetNamedPortsCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupssetnamedportsrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceGroups/{instanceGroup}/setNamedPorts") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "instanceGroup": c.instanceGroup, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.regionInstanceGroups.setNamedPorts" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RegionInstanceGroupsSetNamedPortsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Sets the named ports for the specified regional instance group.", - // "flatPath": "projects/{project}/regions/{region}/instanceGroups/{instanceGroup}/setNamedPorts", - // "httpMethod": "POST", - // "id": "compute.regionInstanceGroups.setNamedPorts", - // "parameterOrder": [ - // "project", - // "region", - // "instanceGroup" - // ], - // "parameters": { - // "instanceGroup": { - // "description": "The name of the regional instance group where the named ports are updated.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "projects/{project}/regions/{region}/instanceGroups/{instanceGroup}/setNamedPorts", - // "request": { - // "$ref": "RegionInstanceGroupsSetNamedPortsRequest" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - // method id "compute.regionInstances.bulkInsert": type RegionInstancesBulkInsertCall struct { @@ -141492,17 +149089,17 @@ func (c *RegionInstancesBulkInsertCall) Do(opts ...googleapi.CallOption) (*Opera if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -141576,11 +149173,11 @@ type RegionNetworkEndpointGroupsDeleteCall struct { // NEG cannot be deleted if it is configured as a backend of a backend // service. // -// - networkEndpointGroup: The name of the network endpoint group to -// delete. It should comply with RFC1035. -// - project: Project ID for this request. -// - region: The name of the region where the network endpoint group is -// located. It should comply with RFC1035. +// - networkEndpointGroup: The name of the network endpoint group to +// delete. It should comply with RFC1035. +// - project: Project ID for this request. +// - region: The name of the region where the network endpoint group is +// located. It should comply with RFC1035. func (r *RegionNetworkEndpointGroupsService) Delete(project string, region string, networkEndpointGroup string) *RegionNetworkEndpointGroupsDeleteCall { c := &RegionNetworkEndpointGroupsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -141669,17 +149266,17 @@ func (c *RegionNetworkEndpointGroupsDeleteCall) Do(opts ...googleapi.CallOption) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -141753,14 +149350,13 @@ type RegionNetworkEndpointGroupsGetCall struct { header_ http.Header } -// Get: Returns the specified network endpoint group. Gets a list of -// available network endpoint groups by making a list() request. +// Get: Returns the specified network endpoint group. // -// - networkEndpointGroup: The name of the network endpoint group. It -// should comply with RFC1035. -// - project: Project ID for this request. -// - region: The name of the region where the network endpoint group is -// located. It should comply with RFC1035. +// - networkEndpointGroup: The name of the network endpoint group. It +// should comply with RFC1035. +// - project: Project ID for this request. +// - region: The name of the region where the network endpoint group is +// located. It should comply with RFC1035. func (r *RegionNetworkEndpointGroupsService) Get(project string, region string, networkEndpointGroup string) *RegionNetworkEndpointGroupsGetCall { c := &RegionNetworkEndpointGroupsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -141846,17 +149442,17 @@ func (c *RegionNetworkEndpointGroupsGetCall) Do(opts ...googleapi.CallOption) (* if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &NetworkEndpointGroup{ ServerResponse: googleapi.ServerResponse{ @@ -141870,7 +149466,7 @@ func (c *RegionNetworkEndpointGroupsGetCall) Do(opts ...googleapi.CallOption) (* } return ret, nil // { - // "description": "Returns the specified network endpoint group. Gets a list of available network endpoint groups by making a list() request.", + // "description": "Returns the specified network endpoint group.", // "flatPath": "projects/{project}/regions/{region}/networkEndpointGroups/{networkEndpointGroup}", // "httpMethod": "GET", // "id": "compute.regionNetworkEndpointGroups.get", @@ -141928,9 +149524,9 @@ type RegionNetworkEndpointGroupsInsertCall struct { // Insert: Creates a network endpoint group in the specified project // using the parameters that are included in the request. // -// - project: Project ID for this request. -// - region: The name of the region where you want to create the network -// endpoint group. It should comply with RFC1035. +// - project: Project ID for this request. +// - region: The name of the region where you want to create the network +// endpoint group. It should comply with RFC1035. func (r *RegionNetworkEndpointGroupsService) Insert(project string, region string, networkendpointgroup *NetworkEndpointGroup) *RegionNetworkEndpointGroupsInsertCall { c := &RegionNetworkEndpointGroupsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -142023,17 +149619,17 @@ func (c *RegionNetworkEndpointGroupsInsertCall) Do(opts ...googleapi.CallOption) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -142105,9 +149701,9 @@ type RegionNetworkEndpointGroupsListCall struct { // List: Retrieves the list of regional network endpoint groups // available to the specified project in the given region. // -// - project: Project ID for this request. -// - region: The name of the region where the network endpoint group is -// located. It should comply with RFC1035. +// - project: Project ID for this request. +// - region: The name of the region where the network endpoint group is +// located. It should comply with RFC1035. func (r *RegionNetworkEndpointGroupsService) List(project string, region string) *RegionNetworkEndpointGroupsListCall { c := &RegionNetworkEndpointGroupsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -142273,17 +149869,17 @@ func (c *RegionNetworkEndpointGroupsListCall) Do(opts ...googleapi.CallOption) ( if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &NetworkEndpointGroupList{ ServerResponse: googleapi.ServerResponse{ @@ -142504,17 +150100,17 @@ func (c *RegionNetworkFirewallPoliciesAddAssociationCall) Do(opts ...googleapi.C if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -142715,17 +150311,17 @@ func (c *RegionNetworkFirewallPoliciesAddRuleCall) Do(opts ...googleapi.CallOpti if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -142915,17 +150511,17 @@ func (c *RegionNetworkFirewallPoliciesCloneRulesCall) Do(opts ...googleapi.CallO if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -143098,17 +150694,17 @@ func (c *RegionNetworkFirewallPoliciesDeleteCall) Do(opts ...googleapi.CallOptio if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -143274,17 +150870,17 @@ func (c *RegionNetworkFirewallPoliciesGetCall) Do(opts ...googleapi.CallOption) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &FirewallPolicy{ ServerResponse: googleapi.ServerResponse{ @@ -143358,10 +150954,10 @@ type RegionNetworkFirewallPoliciesGetAssociationCall struct { // GetAssociation: Gets an association with the specified name. // -// - firewallPolicy: Name of the firewall policy to which the queried -// association belongs. -// - project: Project ID for this request. -// - region: Name of the region scoping this request. +// - firewallPolicy: Name of the firewall policy to which the queried +// association belongs. +// - project: Project ID for this request. +// - region: Name of the region scoping this request. func (r *RegionNetworkFirewallPoliciesService) GetAssociation(project string, region string, firewallPolicy string) *RegionNetworkFirewallPoliciesGetAssociationCall { c := &RegionNetworkFirewallPoliciesGetAssociationCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -143454,17 +151050,17 @@ func (c *RegionNetworkFirewallPoliciesGetAssociationCall) Do(opts ...googleapi.C if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &FirewallPolicyAssociation{ ServerResponse: googleapi.ServerResponse{ @@ -143633,17 +151229,17 @@ func (c *RegionNetworkFirewallPoliciesGetEffectiveFirewallsCall) Do(opts ...goog if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &RegionNetworkFirewallPoliciesGetEffectiveFirewallsResponse{ ServerResponse: googleapi.ServerResponse{ @@ -143812,17 +151408,17 @@ func (c *RegionNetworkFirewallPoliciesGetIamPolicyCall) Do(opts ...googleapi.Cal if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Policy{ ServerResponse: googleapi.ServerResponse{ @@ -143902,10 +151498,10 @@ type RegionNetworkFirewallPoliciesGetRuleCall struct { // GetRule: Gets a rule of the specified priority. // -// - firewallPolicy: Name of the firewall policy to which the queried -// rule belongs. -// - project: Project ID for this request. -// - region: Name of the region scoping this request. +// - firewallPolicy: Name of the firewall policy to which the queried +// rule belongs. +// - project: Project ID for this request. +// - region: Name of the region scoping this request. func (r *RegionNetworkFirewallPoliciesService) GetRule(project string, region string, firewallPolicy string) *RegionNetworkFirewallPoliciesGetRuleCall { c := &RegionNetworkFirewallPoliciesGetRuleCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -143998,17 +151594,17 @@ func (c *RegionNetworkFirewallPoliciesGetRuleCall) Do(opts ...googleapi.CallOpti if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &FirewallPolicyRule{ ServerResponse: googleapi.ServerResponse{ @@ -144182,17 +151778,17 @@ func (c *RegionNetworkFirewallPoliciesInsertCall) Do(opts ...googleapi.CallOptio if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -144432,17 +152028,17 @@ func (c *RegionNetworkFirewallPoliciesListCall) Do(opts ...googleapi.CallOption) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &FirewallPolicyList{ ServerResponse: googleapi.ServerResponse{ @@ -144654,17 +152250,17 @@ func (c *RegionNetworkFirewallPoliciesPatchCall) Do(opts ...googleapi.CallOption if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -144849,17 +152445,17 @@ func (c *RegionNetworkFirewallPoliciesPatchRuleCall) Do(opts ...googleapi.CallOp if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -145044,17 +152640,17 @@ func (c *RegionNetworkFirewallPoliciesRemoveAssociationCall) Do(opts ...googleap if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -145234,17 +152830,17 @@ func (c *RegionNetworkFirewallPoliciesRemoveRuleCall) Do(opts ...googleapi.CallO if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -145410,17 +153006,17 @@ func (c *RegionNetworkFirewallPoliciesSetIamPolicyCall) Do(opts ...googleapi.Cal if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Policy{ ServerResponse: googleapi.ServerResponse{ @@ -145578,17 +153174,17 @@ func (c *RegionNetworkFirewallPoliciesTestIamPermissionsCall) Do(opts ...googlea if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ @@ -145665,10 +153261,10 @@ type RegionNotificationEndpointsDeleteCall struct { // Delete: Deletes the specified NotificationEndpoint in the given // region // -// - notificationEndpoint: Name of the NotificationEndpoint resource to -// delete. -// - project: Project ID for this request. -// - region: Name of the region scoping this request. +// - notificationEndpoint: Name of the NotificationEndpoint resource to +// delete. +// - project: Project ID for this request. +// - region: Name of the region scoping this request. func (r *RegionNotificationEndpointsService) Delete(project string, region string, notificationEndpoint string) *RegionNotificationEndpointsDeleteCall { c := &RegionNotificationEndpointsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -145757,17 +153353,17 @@ func (c *RegionNotificationEndpointsDeleteCall) Do(opts ...googleapi.CallOption) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -145846,10 +153442,10 @@ type RegionNotificationEndpointsGetCall struct { // Get: Returns the specified NotificationEndpoint resource in the given // region. // -// - notificationEndpoint: Name of the NotificationEndpoint resource to -// return. -// - project: Project ID for this request. -// - region: Name of the region scoping this request. +// - notificationEndpoint: Name of the NotificationEndpoint resource to +// return. +// - project: Project ID for this request. +// - region: Name of the region scoping this request. func (r *RegionNotificationEndpointsService) Get(project string, region string, notificationEndpoint string) *RegionNotificationEndpointsGetCall { c := &RegionNotificationEndpointsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -145935,17 +153531,17 @@ func (c *RegionNotificationEndpointsGetCall) Do(opts ...googleapi.CallOption) (* if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &NotificationEndpoint{ ServerResponse: googleapi.ServerResponse{ @@ -146113,17 +153709,17 @@ func (c *RegionNotificationEndpointsInsertCall) Do(opts ...googleapi.CallOption) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -146363,17 +153959,17 @@ func (c *RegionNotificationEndpointsListCall) Do(opts ...googleapi.CallOption) ( if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &NotificationEndpointList{ ServerResponse: googleapi.ServerResponse{ @@ -146557,7 +154153,7 @@ func (c *RegionOperationsDeleteCall) Do(opts ...googleapi.CallOption) error { } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return err + return gensupport.WrapError(err) } return nil // { @@ -146705,17 +154301,17 @@ func (c *RegionOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -146956,17 +154552,17 @@ func (c *RegionOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationL if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &OperationList{ ServerResponse: googleapi.ServerResponse{ @@ -147020,7 +154616,1008 @@ func (c *RegionOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationL // "type": "string" // }, // "region": { - // "description": "Name of the region for this request.", + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + // "location": "query", + // "type": "boolean" + // } + // }, + // "path": "projects/{project}/regions/{region}/operations", + // "response": { + // "$ref": "OperationList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *RegionOperationsListCall) Pages(ctx context.Context, f func(*OperationList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.regionOperations.wait": + +type RegionOperationsWaitCall struct { + s *Service + project string + region string + operation string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Wait: Waits for the specified Operation resource to return as `DONE` +// or for the request to approach the 2 minute deadline, and retrieves +// the specified Operation resource. This method differs from the `GET` +// method in that it waits for no more than the default deadline (2 +// minutes) and then returns the current state of the operation, which +// might be `DONE` or still in progress. This method is called on a +// best-effort basis. Specifically: - In uncommon cases, when the server +// is overloaded, the request might return before the default deadline +// is reached, or might return after zero seconds. - If the default +// deadline is reached, there is no guarantee that the operation is +// actually done when the method returns. Be prepared to retry if the +// operation is not `DONE`. +// +// - operation: Name of the Operations resource to return. +// - project: Project ID for this request. +// - region: Name of the region for this request. +func (r *RegionOperationsService) Wait(project string, region string, operation string) *RegionOperationsWaitCall { + c := &RegionOperationsWaitCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.operation = operation + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionOperationsWaitCall) Fields(s ...googleapi.Field) *RegionOperationsWaitCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionOperationsWaitCall) Context(ctx context.Context) *RegionOperationsWaitCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionOperationsWaitCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionOperationsWaitCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/operations/{operation}/wait") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "operation": c.operation, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionOperations.wait" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionOperationsWaitCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Waits for the specified Operation resource to return as `DONE` or for the request to approach the 2 minute deadline, and retrieves the specified Operation resource. This method differs from the `GET` method in that it waits for no more than the default deadline (2 minutes) and then returns the current state of the operation, which might be `DONE` or still in progress. This method is called on a best-effort basis. Specifically: - In uncommon cases, when the server is overloaded, the request might return before the default deadline is reached, or might return after zero seconds. - If the default deadline is reached, there is no guarantee that the operation is actually done when the method returns. Be prepared to retry if the operation is not `DONE`. ", + // "flatPath": "projects/{project}/regions/{region}/operations/{operation}/wait", + // "httpMethod": "POST", + // "id": "compute.regionOperations.wait", + // "parameterOrder": [ + // "project", + // "region", + // "operation" + // ], + // "parameters": { + // "operation": { + // "description": "Name of the Operations resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/regions/{region}/operations/{operation}/wait", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.regionSecurityPolicies.delete": + +type RegionSecurityPoliciesDeleteCall struct { + s *Service + project string + region string + securityPolicy string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes the specified policy. +// +// - project: Project ID for this request. +// - region: Name of the region scoping this request. +// - securityPolicy: Name of the security policy to delete. +func (r *RegionSecurityPoliciesService) Delete(project string, region string, securityPolicy string) *RegionSecurityPoliciesDeleteCall { + c := &RegionSecurityPoliciesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.securityPolicy = securityPolicy + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *RegionSecurityPoliciesDeleteCall) RequestId(requestId string) *RegionSecurityPoliciesDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionSecurityPoliciesDeleteCall) Fields(s ...googleapi.Field) *RegionSecurityPoliciesDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionSecurityPoliciesDeleteCall) Context(ctx context.Context) *RegionSecurityPoliciesDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionSecurityPoliciesDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionSecurityPoliciesDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/securityPolicies/{securityPolicy}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "securityPolicy": c.securityPolicy, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionSecurityPolicies.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionSecurityPoliciesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified policy.", + // "flatPath": "projects/{project}/regions/{region}/securityPolicies/{securityPolicy}", + // "httpMethod": "DELETE", + // "id": "compute.regionSecurityPolicies.delete", + // "parameterOrder": [ + // "project", + // "region", + // "securityPolicy" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "securityPolicy": { + // "description": "Name of the security policy to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/regions/{region}/securityPolicies/{securityPolicy}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.regionSecurityPolicies.get": + +type RegionSecurityPoliciesGetCall struct { + s *Service + project string + region string + securityPolicy string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: List all of the ordered rules present in a single specified +// policy. +// +// - project: Project ID for this request. +// - region: Name of the region scoping this request. +// - securityPolicy: Name of the security policy to get. +func (r *RegionSecurityPoliciesService) Get(project string, region string, securityPolicy string) *RegionSecurityPoliciesGetCall { + c := &RegionSecurityPoliciesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.securityPolicy = securityPolicy + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionSecurityPoliciesGetCall) Fields(s ...googleapi.Field) *RegionSecurityPoliciesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionSecurityPoliciesGetCall) IfNoneMatch(entityTag string) *RegionSecurityPoliciesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionSecurityPoliciesGetCall) Context(ctx context.Context) *RegionSecurityPoliciesGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionSecurityPoliciesGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionSecurityPoliciesGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/securityPolicies/{securityPolicy}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "securityPolicy": c.securityPolicy, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionSecurityPolicies.get" call. +// Exactly one of *SecurityPolicy or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *SecurityPolicy.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RegionSecurityPoliciesGetCall) Do(opts ...googleapi.CallOption) (*SecurityPolicy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &SecurityPolicy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "List all of the ordered rules present in a single specified policy.", + // "flatPath": "projects/{project}/regions/{region}/securityPolicies/{securityPolicy}", + // "httpMethod": "GET", + // "id": "compute.regionSecurityPolicies.get", + // "parameterOrder": [ + // "project", + // "region", + // "securityPolicy" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "securityPolicy": { + // "description": "Name of the security policy to get.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/regions/{region}/securityPolicies/{securityPolicy}", + // "response": { + // "$ref": "SecurityPolicy" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.regionSecurityPolicies.insert": + +type RegionSecurityPoliciesInsertCall struct { + s *Service + project string + region string + securitypolicy *SecurityPolicy + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Insert: Creates a new policy in the specified project using the data +// included in the request. +// +// - project: Project ID for this request. +// - region: Name of the region scoping this request. +func (r *RegionSecurityPoliciesService) Insert(project string, region string, securitypolicy *SecurityPolicy) *RegionSecurityPoliciesInsertCall { + c := &RegionSecurityPoliciesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.securitypolicy = securitypolicy + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *RegionSecurityPoliciesInsertCall) RequestId(requestId string) *RegionSecurityPoliciesInsertCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// ValidateOnly sets the optional parameter "validateOnly": If true, the +// request will not be committed. +func (c *RegionSecurityPoliciesInsertCall) ValidateOnly(validateOnly bool) *RegionSecurityPoliciesInsertCall { + c.urlParams_.Set("validateOnly", fmt.Sprint(validateOnly)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionSecurityPoliciesInsertCall) Fields(s ...googleapi.Field) *RegionSecurityPoliciesInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionSecurityPoliciesInsertCall) Context(ctx context.Context) *RegionSecurityPoliciesInsertCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionSecurityPoliciesInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionSecurityPoliciesInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.securitypolicy) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/securityPolicies") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionSecurityPolicies.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionSecurityPoliciesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a new policy in the specified project using the data included in the request.", + // "flatPath": "projects/{project}/regions/{region}/securityPolicies", + // "httpMethod": "POST", + // "id": "compute.regionSecurityPolicies.insert", + // "parameterOrder": [ + // "project", + // "region" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "validateOnly": { + // "description": "If true, the request will not be committed.", + // "location": "query", + // "type": "boolean" + // } + // }, + // "path": "projects/{project}/regions/{region}/securityPolicies", + // "request": { + // "$ref": "SecurityPolicy" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.regionSecurityPolicies.list": + +type RegionSecurityPoliciesListCall struct { + s *Service + project string + region string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: List all the policies that have been configured for the +// specified project and region. +// +// - project: Project ID for this request. +// - region: Name of the region scoping this request. +func (r *RegionSecurityPoliciesService) List(project string, region string) *RegionSecurityPoliciesListCall { + c := &RegionSecurityPoliciesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic +// restarts. You can use filtering on nested fields to filter based on +// resource labels. To filter on multiple expressions, provide each +// separate expression within parentheses. For example: ``` +// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") +// ``` By default, each expression is an `AND` expression. However, you +// can include `AND` and `OR` expressions explicitly. For example: ``` +// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. +func (c *RegionSecurityPoliciesListCall) Filter(filter string) *RegionSecurityPoliciesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *RegionSecurityPoliciesListCall) MaxResults(maxResults int64) *RegionSecurityPoliciesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. You can also sort results in +// descending order based on the creation timestamp using +// `orderBy="creationTimestamp desc". This sorts results based on the +// `creationTimestamp` field in reverse chronological order (newest +// result first). Use this to sort resources like operations so that the +// newest operation is returned first. Currently, only sorting by `name` +// or `creationTimestamp desc` is supported. +func (c *RegionSecurityPoliciesListCall) OrderBy(orderBy string) *RegionSecurityPoliciesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *RegionSecurityPoliciesListCall) PageToken(pageToken string) *RegionSecurityPoliciesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false. +func (c *RegionSecurityPoliciesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionSecurityPoliciesListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionSecurityPoliciesListCall) Fields(s ...googleapi.Field) *RegionSecurityPoliciesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionSecurityPoliciesListCall) IfNoneMatch(entityTag string) *RegionSecurityPoliciesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionSecurityPoliciesListCall) Context(ctx context.Context) *RegionSecurityPoliciesListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionSecurityPoliciesListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionSecurityPoliciesListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/securityPolicies") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionSecurityPolicies.list" call. +// Exactly one of *SecurityPolicyList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *SecurityPolicyList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RegionSecurityPoliciesListCall) Do(opts ...googleapi.CallOption) (*SecurityPolicyList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &SecurityPolicyList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "List all the policies that have been configured for the specified project and region.", + // "flatPath": "projects/{project}/regions/{region}/securityPolicies", + // "httpMethod": "GET", + // "id": "compute.regionSecurityPolicies.list", + // "parameterOrder": [ + // "project", + // "region" + // ], + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -147032,9 +155629,9 @@ func (c *RegionOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationL // "type": "boolean" // } // }, - // "path": "projects/{project}/regions/{region}/operations", + // "path": "projects/{project}/regions/{region}/securityPolicies", // "response": { - // "$ref": "OperationList" + // "$ref": "SecurityPolicyList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -147048,7 +155645,7 @@ func (c *RegionOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationL // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *RegionOperationsListCall) Pages(ctx context.Context, f func(*OperationList) error) error { +func (c *RegionSecurityPoliciesListCall) Pages(ctx context.Context, f func(*SecurityPolicyList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -147066,46 +155663,57 @@ func (c *RegionOperationsListCall) Pages(ctx context.Context, f func(*OperationL } } -// method id "compute.regionOperations.wait": +// method id "compute.regionSecurityPolicies.patch": -type RegionOperationsWaitCall struct { - s *Service - project string - region string - operation string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionSecurityPoliciesPatchCall struct { + s *Service + project string + region string + securityPolicy string + securitypolicy *SecurityPolicy + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Wait: Waits for the specified Operation resource to return as `DONE` -// or for the request to approach the 2 minute deadline, and retrieves -// the specified Operation resource. This method differs from the `GET` -// method in that it waits for no more than the default deadline (2 -// minutes) and then returns the current state of the operation, which -// might be `DONE` or still in progress. This method is called on a -// best-effort basis. Specifically: - In uncommon cases, when the server -// is overloaded, the request might return before the default deadline -// is reached, or might return after zero seconds. - If the default -// deadline is reached, there is no guarantee that the operation is -// actually done when the method returns. Be prepared to retry if the -// operation is not `DONE`. +// Patch: Patches the specified policy with the data included in the +// request. To clear fields in the rule, leave the fields empty and +// specify them in the updateMask. This cannot be used to be update the +// rules in the policy. Please use the per rule methods like addRule, +// patchRule, and removeRule instead. // -// - operation: Name of the Operations resource to return. // - project: Project ID for this request. -// - region: Name of the region for this request. -func (r *RegionOperationsService) Wait(project string, region string, operation string) *RegionOperationsWaitCall { - c := &RegionOperationsWaitCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - region: Name of the region scoping this request. +// - securityPolicy: Name of the security policy to update. +func (r *RegionSecurityPoliciesService) Patch(project string, region string, securityPolicy string, securitypolicy *SecurityPolicy) *RegionSecurityPoliciesPatchCall { + c := &RegionSecurityPoliciesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.operation = operation + c.securityPolicy = securityPolicy + c.securitypolicy = securitypolicy + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *RegionSecurityPoliciesPatchCall) RequestId(requestId string) *RegionSecurityPoliciesPatchCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionOperationsWaitCall) Fields(s ...googleapi.Field) *RegionOperationsWaitCall { +func (c *RegionSecurityPoliciesPatchCall) Fields(s ...googleapi.Field) *RegionSecurityPoliciesPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -147113,21 +155721,21 @@ func (c *RegionOperationsWaitCall) Fields(s ...googleapi.Field) *RegionOperation // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionOperationsWaitCall) Context(ctx context.Context) *RegionOperationsWaitCall { +func (c *RegionSecurityPoliciesPatchCall) Context(ctx context.Context) *RegionSecurityPoliciesPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionOperationsWaitCall) Header() http.Header { +func (c *RegionSecurityPoliciesPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionOperationsWaitCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionSecurityPoliciesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -147135,48 +155743,53 @@ func (c *RegionOperationsWaitCall) doRequest(alt string) (*http.Response, error) } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.securitypolicy) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/operations/{operation}/wait") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/securityPolicies/{securityPolicy}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "operation": c.operation, + "project": c.project, + "region": c.region, + "securityPolicy": c.securityPolicy, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionOperations.wait" call. +// Do executes the "compute.regionSecurityPolicies.patch" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionOperationsWaitCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionSecurityPoliciesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -147190,23 +155803,16 @@ func (c *RegionOperationsWaitCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Waits for the specified Operation resource to return as `DONE` or for the request to approach the 2 minute deadline, and retrieves the specified Operation resource. This method differs from the `GET` method in that it waits for no more than the default deadline (2 minutes) and then returns the current state of the operation, which might be `DONE` or still in progress. This method is called on a best-effort basis. Specifically: - In uncommon cases, when the server is overloaded, the request might return before the default deadline is reached, or might return after zero seconds. - If the default deadline is reached, there is no guarantee that the operation is actually done when the method returns. Be prepared to retry if the operation is not `DONE`. ", - // "flatPath": "projects/{project}/regions/{region}/operations/{operation}/wait", - // "httpMethod": "POST", - // "id": "compute.regionOperations.wait", + // "description": "Patches the specified policy with the data included in the request. To clear fields in the rule, leave the fields empty and specify them in the updateMask. This cannot be used to be update the rules in the policy. Please use the per rule methods like addRule, patchRule, and removeRule instead.", + // "flatPath": "projects/{project}/regions/{region}/securityPolicies/{securityPolicy}", + // "httpMethod": "PATCH", + // "id": "compute.regionSecurityPolicies.patch", // "parameterOrder": [ // "project", // "region", - // "operation" + // "securityPolicy" // ], // "parameters": { - // "operation": { - // "description": "Name of the Operations resource to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -147215,48 +155821,62 @@ func (c *RegionOperationsWaitCall) Do(opts ...googleapi.CallOption) (*Operation, // "type": "string" // }, // "region": { - // "description": "Name of the region for this request.", + // "description": "Name of the region scoping this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "securityPolicy": { + // "description": "Name of the security policy to update.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/operations/{operation}/wait", + // "path": "projects/{project}/regions/{region}/securityPolicies/{securityPolicy}", + // "request": { + // "$ref": "SecurityPolicy" + // }, // "response": { // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.regionSecurityPolicies.delete": +// method id "compute.regionSslCertificates.delete": -type RegionSecurityPoliciesDeleteCall struct { +type RegionSslCertificatesDeleteCall struct { s *Service project string region string - securityPolicy string + sslCertificate string urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// Delete: Deletes the specified policy. +// Delete: Deletes the specified SslCertificate resource in the region. // // - project: Project ID for this request. // - region: Name of the region scoping this request. -// - securityPolicy: Name of the security policy to delete. -func (r *RegionSecurityPoliciesService) Delete(project string, region string, securityPolicy string) *RegionSecurityPoliciesDeleteCall { - c := &RegionSecurityPoliciesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - sslCertificate: Name of the SslCertificate resource to delete. +func (r *RegionSslCertificatesService) Delete(project string, region string, sslCertificate string) *RegionSslCertificatesDeleteCall { + c := &RegionSslCertificatesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.securityPolicy = securityPolicy + c.sslCertificate = sslCertificate return c } @@ -147271,7 +155891,7 @@ func (r *RegionSecurityPoliciesService) Delete(project string, region string, se // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *RegionSecurityPoliciesDeleteCall) RequestId(requestId string) *RegionSecurityPoliciesDeleteCall { +func (c *RegionSslCertificatesDeleteCall) RequestId(requestId string) *RegionSslCertificatesDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -147279,7 +155899,7 @@ func (c *RegionSecurityPoliciesDeleteCall) RequestId(requestId string) *RegionSe // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionSecurityPoliciesDeleteCall) Fields(s ...googleapi.Field) *RegionSecurityPoliciesDeleteCall { +func (c *RegionSslCertificatesDeleteCall) Fields(s ...googleapi.Field) *RegionSslCertificatesDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -147287,21 +155907,21 @@ func (c *RegionSecurityPoliciesDeleteCall) Fields(s ...googleapi.Field) *RegionS // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionSecurityPoliciesDeleteCall) Context(ctx context.Context) *RegionSecurityPoliciesDeleteCall { +func (c *RegionSslCertificatesDeleteCall) Context(ctx context.Context) *RegionSslCertificatesDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionSecurityPoliciesDeleteCall) Header() http.Header { +func (c *RegionSslCertificatesDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionSecurityPoliciesDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionSslCertificatesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -147311,7 +155931,7 @@ func (c *RegionSecurityPoliciesDeleteCall) doRequest(alt string) (*http.Response var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/securityPolicies/{securityPolicy}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/sslCertificates/{sslCertificate}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("DELETE", urls, body) if err != nil { @@ -147321,36 +155941,36 @@ func (c *RegionSecurityPoliciesDeleteCall) doRequest(alt string) (*http.Response googleapi.Expand(req.URL, map[string]string{ "project": c.project, "region": c.region, - "securityPolicy": c.securityPolicy, + "sslCertificate": c.sslCertificate, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionSecurityPolicies.delete" call. +// Do executes the "compute.regionSslCertificates.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionSecurityPoliciesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionSslCertificatesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -147364,14 +155984,14 @@ func (c *RegionSecurityPoliciesDeleteCall) Do(opts ...googleapi.CallOption) (*Op } return ret, nil // { - // "description": "Deletes the specified policy.", - // "flatPath": "projects/{project}/regions/{region}/securityPolicies/{securityPolicy}", + // "description": "Deletes the specified SslCertificate resource in the region.", + // "flatPath": "projects/{project}/regions/{region}/sslCertificates/{sslCertificate}", // "httpMethod": "DELETE", - // "id": "compute.regionSecurityPolicies.delete", + // "id": "compute.regionSslCertificates.delete", // "parameterOrder": [ // "project", // "region", - // "securityPolicy" + // "sslCertificate" // ], // "parameters": { // "project": { @@ -147393,15 +156013,15 @@ func (c *RegionSecurityPoliciesDeleteCall) Do(opts ...googleapi.CallOption) (*Op // "location": "query", // "type": "string" // }, - // "securityPolicy": { - // "description": "Name of the security policy to delete.", + // "sslCertificate": { + // "description": "Name of the SslCertificate resource to delete.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/securityPolicies/{securityPolicy}", + // "path": "projects/{project}/regions/{region}/sslCertificates/{sslCertificate}", // "response": { // "$ref": "Operation" // }, @@ -147413,37 +156033,38 @@ func (c *RegionSecurityPoliciesDeleteCall) Do(opts ...googleapi.CallOption) (*Op } -// method id "compute.regionSecurityPolicies.get": +// method id "compute.regionSslCertificates.get": -type RegionSecurityPoliciesGetCall struct { +type RegionSslCertificatesGetCall struct { s *Service project string region string - securityPolicy string + sslCertificate string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// Get: List all of the ordered rules present in a single specified -// policy. +// Get: Returns the specified SslCertificate resource in the specified +// region. Get a list of available SSL certificates by making a list() +// request. // // - project: Project ID for this request. // - region: Name of the region scoping this request. -// - securityPolicy: Name of the security policy to get. -func (r *RegionSecurityPoliciesService) Get(project string, region string, securityPolicy string) *RegionSecurityPoliciesGetCall { - c := &RegionSecurityPoliciesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - sslCertificate: Name of the SslCertificate resource to return. +func (r *RegionSslCertificatesService) Get(project string, region string, sslCertificate string) *RegionSslCertificatesGetCall { + c := &RegionSslCertificatesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.securityPolicy = securityPolicy + c.sslCertificate = sslCertificate return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionSecurityPoliciesGetCall) Fields(s ...googleapi.Field) *RegionSecurityPoliciesGetCall { +func (c *RegionSslCertificatesGetCall) Fields(s ...googleapi.Field) *RegionSslCertificatesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -147453,7 +156074,7 @@ func (c *RegionSecurityPoliciesGetCall) Fields(s ...googleapi.Field) *RegionSecu // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *RegionSecurityPoliciesGetCall) IfNoneMatch(entityTag string) *RegionSecurityPoliciesGetCall { +func (c *RegionSslCertificatesGetCall) IfNoneMatch(entityTag string) *RegionSslCertificatesGetCall { c.ifNoneMatch_ = entityTag return c } @@ -147461,21 +156082,21 @@ func (c *RegionSecurityPoliciesGetCall) IfNoneMatch(entityTag string) *RegionSec // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionSecurityPoliciesGetCall) Context(ctx context.Context) *RegionSecurityPoliciesGetCall { +func (c *RegionSslCertificatesGetCall) Context(ctx context.Context) *RegionSslCertificatesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionSecurityPoliciesGetCall) Header() http.Header { +func (c *RegionSslCertificatesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionSecurityPoliciesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionSslCertificatesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -147488,7 +156109,7 @@ func (c *RegionSecurityPoliciesGetCall) doRequest(alt string) (*http.Response, e var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/securityPolicies/{securityPolicy}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/sslCertificates/{sslCertificate}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -147498,38 +156119,38 @@ func (c *RegionSecurityPoliciesGetCall) doRequest(alt string) (*http.Response, e googleapi.Expand(req.URL, map[string]string{ "project": c.project, "region": c.region, - "securityPolicy": c.securityPolicy, + "sslCertificate": c.sslCertificate, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionSecurityPolicies.get" call. -// Exactly one of *SecurityPolicy or error will be non-nil. Any non-2xx +// Do executes the "compute.regionSslCertificates.get" call. +// Exactly one of *SslCertificate or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *SecurityPolicy.ServerResponse.Header or (if a response was returned +// *SslCertificate.ServerResponse.Header or (if a response was returned // at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *RegionSecurityPoliciesGetCall) Do(opts ...googleapi.CallOption) (*SecurityPolicy, error) { +func (c *RegionSslCertificatesGetCall) Do(opts ...googleapi.CallOption) (*SslCertificate, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &SecurityPolicy{ + ret := &SslCertificate{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -147541,14 +156162,14 @@ func (c *RegionSecurityPoliciesGetCall) Do(opts ...googleapi.CallOption) (*Secur } return ret, nil // { - // "description": "List all of the ordered rules present in a single specified policy.", - // "flatPath": "projects/{project}/regions/{region}/securityPolicies/{securityPolicy}", + // "description": "Returns the specified SslCertificate resource in the specified region. Get a list of available SSL certificates by making a list() request.", + // "flatPath": "projects/{project}/regions/{region}/sslCertificates/{sslCertificate}", // "httpMethod": "GET", - // "id": "compute.regionSecurityPolicies.get", + // "id": "compute.regionSslCertificates.get", // "parameterOrder": [ // "project", // "region", - // "securityPolicy" + // "sslCertificate" // ], // "parameters": { // "project": { @@ -147565,17 +156186,17 @@ func (c *RegionSecurityPoliciesGetCall) Do(opts ...googleapi.CallOption) (*Secur // "required": true, // "type": "string" // }, - // "securityPolicy": { - // "description": "Name of the security policy to get.", + // "sslCertificate": { + // "description": "Name of the SslCertificate resource to return.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/securityPolicies/{securityPolicy}", + // "path": "projects/{project}/regions/{region}/sslCertificates/{sslCertificate}", // "response": { - // "$ref": "SecurityPolicy" + // "$ref": "SslCertificate" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -147586,28 +156207,28 @@ func (c *RegionSecurityPoliciesGetCall) Do(opts ...googleapi.CallOption) (*Secur } -// method id "compute.regionSecurityPolicies.insert": +// method id "compute.regionSslCertificates.insert": -type RegionSecurityPoliciesInsertCall struct { +type RegionSslCertificatesInsertCall struct { s *Service project string region string - securitypolicy *SecurityPolicy + sslcertificate *SslCertificate urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// Insert: Creates a new policy in the specified project using the data -// included in the request. +// Insert: Creates a SslCertificate resource in the specified project +// and region using the data included in the request // // - project: Project ID for this request. // - region: Name of the region scoping this request. -func (r *RegionSecurityPoliciesService) Insert(project string, region string, securitypolicy *SecurityPolicy) *RegionSecurityPoliciesInsertCall { - c := &RegionSecurityPoliciesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *RegionSslCertificatesService) Insert(project string, region string, sslcertificate *SslCertificate) *RegionSslCertificatesInsertCall { + c := &RegionSslCertificatesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.securitypolicy = securitypolicy + c.sslcertificate = sslcertificate return c } @@ -147622,22 +156243,15 @@ func (r *RegionSecurityPoliciesService) Insert(project string, region string, se // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *RegionSecurityPoliciesInsertCall) RequestId(requestId string) *RegionSecurityPoliciesInsertCall { +func (c *RegionSslCertificatesInsertCall) RequestId(requestId string) *RegionSslCertificatesInsertCall { c.urlParams_.Set("requestId", requestId) return c } -// ValidateOnly sets the optional parameter "validateOnly": If true, the -// request will not be committed. -func (c *RegionSecurityPoliciesInsertCall) ValidateOnly(validateOnly bool) *RegionSecurityPoliciesInsertCall { - c.urlParams_.Set("validateOnly", fmt.Sprint(validateOnly)) - return c -} - // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionSecurityPoliciesInsertCall) Fields(s ...googleapi.Field) *RegionSecurityPoliciesInsertCall { +func (c *RegionSslCertificatesInsertCall) Fields(s ...googleapi.Field) *RegionSslCertificatesInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -147645,21 +156259,21 @@ func (c *RegionSecurityPoliciesInsertCall) Fields(s ...googleapi.Field) *RegionS // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionSecurityPoliciesInsertCall) Context(ctx context.Context) *RegionSecurityPoliciesInsertCall { +func (c *RegionSslCertificatesInsertCall) Context(ctx context.Context) *RegionSslCertificatesInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionSecurityPoliciesInsertCall) Header() http.Header { +func (c *RegionSslCertificatesInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionSecurityPoliciesInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionSslCertificatesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -147667,14 +156281,14 @@ func (c *RegionSecurityPoliciesInsertCall) doRequest(alt string) (*http.Response } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.securitypolicy) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.sslcertificate) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/securityPolicies") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/sslCertificates") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -147688,31 +156302,31 @@ func (c *RegionSecurityPoliciesInsertCall) doRequest(alt string) (*http.Response return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionSecurityPolicies.insert" call. +// Do executes the "compute.regionSslCertificates.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionSecurityPoliciesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionSslCertificatesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -147726,10 +156340,10 @@ func (c *RegionSecurityPoliciesInsertCall) Do(opts ...googleapi.CallOption) (*Op } return ret, nil // { - // "description": "Creates a new policy in the specified project using the data included in the request.", - // "flatPath": "projects/{project}/regions/{region}/securityPolicies", + // "description": "Creates a SslCertificate resource in the specified project and region using the data included in the request", + // "flatPath": "projects/{project}/regions/{region}/sslCertificates", // "httpMethod": "POST", - // "id": "compute.regionSecurityPolicies.insert", + // "id": "compute.regionSslCertificates.insert", // "parameterOrder": [ // "project", // "region" @@ -147753,16 +156367,11 @@ func (c *RegionSecurityPoliciesInsertCall) Do(opts ...googleapi.CallOption) (*Op // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" - // }, - // "validateOnly": { - // "description": "If true, the request will not be committed.", - // "location": "query", - // "type": "boolean" // } // }, - // "path": "projects/{project}/regions/{region}/securityPolicies", + // "path": "projects/{project}/regions/{region}/sslCertificates", // "request": { - // "$ref": "SecurityPolicy" + // "$ref": "SslCertificate" // }, // "response": { // "$ref": "Operation" @@ -147775,9 +156384,9 @@ func (c *RegionSecurityPoliciesInsertCall) Do(opts ...googleapi.CallOption) (*Op } -// method id "compute.regionSecurityPolicies.list": +// method id "compute.regionSslCertificates.list": -type RegionSecurityPoliciesListCall struct { +type RegionSslCertificatesListCall struct { s *Service project string region string @@ -147787,13 +156396,13 @@ type RegionSecurityPoliciesListCall struct { header_ http.Header } -// List: List all the policies that have been configured for the -// specified project and region. +// List: Retrieves the list of SslCertificate resources available to the +// specified project in the specified region. // // - project: Project ID for this request. // - region: Name of the region scoping this request. -func (r *RegionSecurityPoliciesService) List(project string, region string) *RegionSecurityPoliciesListCall { - c := &RegionSecurityPoliciesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *RegionSslCertificatesService) List(project string, region string) *RegionSslCertificatesListCall { + c := &RegionSslCertificatesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region return c @@ -147834,7 +156443,7 @@ func (r *RegionSecurityPoliciesService) List(project string, region string) *Reg // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne // .*instance`. -func (c *RegionSecurityPoliciesListCall) Filter(filter string) *RegionSecurityPoliciesListCall { +func (c *RegionSslCertificatesListCall) Filter(filter string) *RegionSslCertificatesListCall { c.urlParams_.Set("filter", filter) return c } @@ -147845,7 +156454,7 @@ func (c *RegionSecurityPoliciesListCall) Filter(filter string) *RegionSecurityPo // a `nextPageToken` that can be used to get the next page of results in // subsequent list requests. Acceptable values are `0` to `500`, // inclusive. (Default: `500`) -func (c *RegionSecurityPoliciesListCall) MaxResults(maxResults int64) *RegionSecurityPoliciesListCall { +func (c *RegionSslCertificatesListCall) MaxResults(maxResults int64) *RegionSslCertificatesListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -147859,7 +156468,7 @@ func (c *RegionSecurityPoliciesListCall) MaxResults(maxResults int64) *RegionSec // result first). Use this to sort resources like operations so that the // newest operation is returned first. Currently, only sorting by `name` // or `creationTimestamp desc` is supported. -func (c *RegionSecurityPoliciesListCall) OrderBy(orderBy string) *RegionSecurityPoliciesListCall { +func (c *RegionSslCertificatesListCall) OrderBy(orderBy string) *RegionSslCertificatesListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -147867,7 +156476,7 @@ func (c *RegionSecurityPoliciesListCall) OrderBy(orderBy string) *RegionSecurity // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. -func (c *RegionSecurityPoliciesListCall) PageToken(pageToken string) *RegionSecurityPoliciesListCall { +func (c *RegionSslCertificatesListCall) PageToken(pageToken string) *RegionSslCertificatesListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -147876,7 +156485,7 @@ func (c *RegionSecurityPoliciesListCall) PageToken(pageToken string) *RegionSecu // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is // false. -func (c *RegionSecurityPoliciesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionSecurityPoliciesListCall { +func (c *RegionSslCertificatesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionSslCertificatesListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c } @@ -147884,7 +156493,7 @@ func (c *RegionSecurityPoliciesListCall) ReturnPartialSuccess(returnPartialSucce // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionSecurityPoliciesListCall) Fields(s ...googleapi.Field) *RegionSecurityPoliciesListCall { +func (c *RegionSslCertificatesListCall) Fields(s ...googleapi.Field) *RegionSslCertificatesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -147894,7 +156503,7 @@ func (c *RegionSecurityPoliciesListCall) Fields(s ...googleapi.Field) *RegionSec // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *RegionSecurityPoliciesListCall) IfNoneMatch(entityTag string) *RegionSecurityPoliciesListCall { +func (c *RegionSslCertificatesListCall) IfNoneMatch(entityTag string) *RegionSslCertificatesListCall { c.ifNoneMatch_ = entityTag return c } @@ -147902,21 +156511,21 @@ func (c *RegionSecurityPoliciesListCall) IfNoneMatch(entityTag string) *RegionSe // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionSecurityPoliciesListCall) Context(ctx context.Context) *RegionSecurityPoliciesListCall { +func (c *RegionSslCertificatesListCall) Context(ctx context.Context) *RegionSslCertificatesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionSecurityPoliciesListCall) Header() http.Header { +func (c *RegionSslCertificatesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionSecurityPoliciesListCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionSslCertificatesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -147929,7 +156538,7 @@ func (c *RegionSecurityPoliciesListCall) doRequest(alt string) (*http.Response, var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/securityPolicies") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/sslCertificates") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -147943,33 +156552,33 @@ func (c *RegionSecurityPoliciesListCall) doRequest(alt string) (*http.Response, return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionSecurityPolicies.list" call. -// Exactly one of *SecurityPolicyList or error will be non-nil. Any +// Do executes the "compute.regionSslCertificates.list" call. +// Exactly one of *SslCertificateList or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either -// *SecurityPolicyList.ServerResponse.Header or (if a response was +// *SslCertificateList.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *RegionSecurityPoliciesListCall) Do(opts ...googleapi.CallOption) (*SecurityPolicyList, error) { +func (c *RegionSslCertificatesListCall) Do(opts ...googleapi.CallOption) (*SslCertificateList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &SecurityPolicyList{ + ret := &SslCertificateList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -147981,10 +156590,10 @@ func (c *RegionSecurityPoliciesListCall) Do(opts ...googleapi.CallOption) (*Secu } return ret, nil // { - // "description": "List all the policies that have been configured for the specified project and region.", - // "flatPath": "projects/{project}/regions/{region}/securityPolicies", + // "description": "Retrieves the list of SslCertificate resources available to the specified project in the specified region.", + // "flatPath": "projects/{project}/regions/{region}/sslCertificates", // "httpMethod": "GET", - // "id": "compute.regionSecurityPolicies.list", + // "id": "compute.regionSslCertificates.list", // "parameterOrder": [ // "project", // "region" @@ -148033,9 +156642,9 @@ func (c *RegionSecurityPoliciesListCall) Do(opts ...googleapi.CallOption) (*Secu // "type": "boolean" // } // }, - // "path": "projects/{project}/regions/{region}/securityPolicies", + // "path": "projects/{project}/regions/{region}/sslCertificates", // "response": { - // "$ref": "SecurityPolicyList" + // "$ref": "SslCertificateList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -148049,7 +156658,7 @@ func (c *RegionSecurityPoliciesListCall) Do(opts ...googleapi.CallOption) (*Secu // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *RegionSecurityPoliciesListCall) Pages(ctx context.Context, f func(*SecurityPolicyList) error) error { +func (c *RegionSslCertificatesListCall) Pages(ctx context.Context, f func(*SslCertificateList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -148067,217 +156676,31 @@ func (c *RegionSecurityPoliciesListCall) Pages(ctx context.Context, f func(*Secu } } -// method id "compute.regionSecurityPolicies.patch": - -type RegionSecurityPoliciesPatchCall struct { - s *Service - project string - region string - securityPolicy string - securitypolicy *SecurityPolicy - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Patch: Patches the specified policy with the data included in the -// request. -// -// - project: Project ID for this request. -// - region: Name of the region scoping this request. -// - securityPolicy: Name of the security policy to update. -func (r *RegionSecurityPoliciesService) Patch(project string, region string, securityPolicy string, securitypolicy *SecurityPolicy) *RegionSecurityPoliciesPatchCall { - c := &RegionSecurityPoliciesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.securityPolicy = securityPolicy - c.securitypolicy = securitypolicy - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. For example, consider a -// situation where you make an initial request and the request times -// out. If you make the request again with the same request ID, the -// server can check if original operation with the same request ID was -// received, and if so, will ignore the second request. This prevents -// clients from accidentally creating duplicate commitments. The request -// ID must be a valid UUID with the exception that zero UUID is not -// supported ( 00000000-0000-0000-0000-000000000000). -func (c *RegionSecurityPoliciesPatchCall) RequestId(requestId string) *RegionSecurityPoliciesPatchCall { - c.urlParams_.Set("requestId", requestId) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *RegionSecurityPoliciesPatchCall) Fields(s ...googleapi.Field) *RegionSecurityPoliciesPatchCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *RegionSecurityPoliciesPatchCall) Context(ctx context.Context) *RegionSecurityPoliciesPatchCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *RegionSecurityPoliciesPatchCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *RegionSecurityPoliciesPatchCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.securitypolicy) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/securityPolicies/{securityPolicy}") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "securityPolicy": c.securityPolicy, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.regionSecurityPolicies.patch" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RegionSecurityPoliciesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Patches the specified policy with the data included in the request.", - // "flatPath": "projects/{project}/regions/{region}/securityPolicies/{securityPolicy}", - // "httpMethod": "PATCH", - // "id": "compute.regionSecurityPolicies.patch", - // "parameterOrder": [ - // "project", - // "region", - // "securityPolicy" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "securityPolicy": { - // "description": "Name of the security policy to update.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // } - // }, - // "path": "projects/{project}/regions/{region}/securityPolicies/{securityPolicy}", - // "request": { - // "$ref": "SecurityPolicy" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.regionSslCertificates.delete": +// method id "compute.regionSslPolicies.delete": -type RegionSslCertificatesDeleteCall struct { - s *Service - project string - region string - sslCertificate string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionSslPoliciesDeleteCall struct { + s *Service + project string + region string + sslPolicy string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified SslCertificate resource in the region. +// Delete: Deletes the specified SSL policy. The SSL policy resource can +// be deleted only if it is not in use by any TargetHttpsProxy or +// TargetSslProxy resources. // -// - project: Project ID for this request. -// - region: Name of the region scoping this request. -// - sslCertificate: Name of the SslCertificate resource to delete. -func (r *RegionSslCertificatesService) Delete(project string, region string, sslCertificate string) *RegionSslCertificatesDeleteCall { - c := &RegionSslCertificatesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - project: Project ID for this request. +// - region: Name of the region scoping this request. +// - sslPolicy: Name of the SSL policy to delete. The name must be 1-63 +// characters long, and comply with RFC1035. +func (r *RegionSslPoliciesService) Delete(project string, region string, sslPolicy string) *RegionSslPoliciesDeleteCall { + c := &RegionSslPoliciesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.sslCertificate = sslCertificate + c.sslPolicy = sslPolicy return c } @@ -148292,7 +156715,7 @@ func (r *RegionSslCertificatesService) Delete(project string, region string, ssl // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *RegionSslCertificatesDeleteCall) RequestId(requestId string) *RegionSslCertificatesDeleteCall { +func (c *RegionSslPoliciesDeleteCall) RequestId(requestId string) *RegionSslPoliciesDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -148300,7 +156723,7 @@ func (c *RegionSslCertificatesDeleteCall) RequestId(requestId string) *RegionSsl // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionSslCertificatesDeleteCall) Fields(s ...googleapi.Field) *RegionSslCertificatesDeleteCall { +func (c *RegionSslPoliciesDeleteCall) Fields(s ...googleapi.Field) *RegionSslPoliciesDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -148308,21 +156731,21 @@ func (c *RegionSslCertificatesDeleteCall) Fields(s ...googleapi.Field) *RegionSs // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionSslCertificatesDeleteCall) Context(ctx context.Context) *RegionSslCertificatesDeleteCall { +func (c *RegionSslPoliciesDeleteCall) Context(ctx context.Context) *RegionSslPoliciesDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionSslCertificatesDeleteCall) Header() http.Header { +func (c *RegionSslPoliciesDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionSslCertificatesDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionSslPoliciesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -148332,7 +156755,7 @@ func (c *RegionSslCertificatesDeleteCall) doRequest(alt string) (*http.Response, var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/sslCertificates/{sslCertificate}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/sslPolicies/{sslPolicy}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("DELETE", urls, body) if err != nil { @@ -148340,38 +156763,38 @@ func (c *RegionSslCertificatesDeleteCall) doRequest(alt string) (*http.Response, } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "sslCertificate": c.sslCertificate, + "project": c.project, + "region": c.region, + "sslPolicy": c.sslPolicy, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionSslCertificates.delete" call. +// Do executes the "compute.regionSslPolicies.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionSslCertificatesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionSslPoliciesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -148385,14 +156808,14 @@ func (c *RegionSslCertificatesDeleteCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Deletes the specified SslCertificate resource in the region.", - // "flatPath": "projects/{project}/regions/{region}/sslCertificates/{sslCertificate}", + // "description": "Deletes the specified SSL policy. The SSL policy resource can be deleted only if it is not in use by any TargetHttpsProxy or TargetSslProxy resources.", + // "flatPath": "projects/{project}/regions/{region}/sslPolicies/{sslPolicy}", // "httpMethod": "DELETE", - // "id": "compute.regionSslCertificates.delete", + // "id": "compute.regionSslPolicies.delete", // "parameterOrder": [ // "project", // "region", - // "sslCertificate" + // "sslPolicy" // ], // "parameters": { // "project": { @@ -148414,15 +156837,14 @@ func (c *RegionSslCertificatesDeleteCall) Do(opts ...googleapi.CallOption) (*Ope // "location": "query", // "type": "string" // }, - // "sslCertificate": { - // "description": "Name of the SslCertificate resource to delete.", + // "sslPolicy": { + // "description": "Name of the SSL policy to delete. The name must be 1-63 characters long, and comply with RFC1035.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/sslCertificates/{sslCertificate}", + // "path": "projects/{project}/regions/{region}/sslPolicies/{sslPolicy}", // "response": { // "$ref": "Operation" // }, @@ -148434,38 +156856,38 @@ func (c *RegionSslCertificatesDeleteCall) Do(opts ...googleapi.CallOption) (*Ope } -// method id "compute.regionSslCertificates.get": +// method id "compute.regionSslPolicies.get": -type RegionSslCertificatesGetCall struct { - s *Service - project string - region string - sslCertificate string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type RegionSslPoliciesGetCall struct { + s *Service + project string + region string + sslPolicy string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified SslCertificate resource in the specified -// region. Get a list of available SSL certificates by making a list() -// request. +// Get: Lists all of the ordered rules present in a single specified +// policy. // -// - project: Project ID for this request. -// - region: Name of the region scoping this request. -// - sslCertificate: Name of the SslCertificate resource to return. -func (r *RegionSslCertificatesService) Get(project string, region string, sslCertificate string) *RegionSslCertificatesGetCall { - c := &RegionSslCertificatesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - project: Project ID for this request. +// - region: Name of the region scoping this request. +// - sslPolicy: Name of the SSL policy to update. The name must be 1-63 +// characters long, and comply with RFC1035. +func (r *RegionSslPoliciesService) Get(project string, region string, sslPolicy string) *RegionSslPoliciesGetCall { + c := &RegionSslPoliciesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.sslCertificate = sslCertificate + c.sslPolicy = sslPolicy return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionSslCertificatesGetCall) Fields(s ...googleapi.Field) *RegionSslCertificatesGetCall { +func (c *RegionSslPoliciesGetCall) Fields(s ...googleapi.Field) *RegionSslPoliciesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -148475,7 +156897,7 @@ func (c *RegionSslCertificatesGetCall) Fields(s ...googleapi.Field) *RegionSslCe // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *RegionSslCertificatesGetCall) IfNoneMatch(entityTag string) *RegionSslCertificatesGetCall { +func (c *RegionSslPoliciesGetCall) IfNoneMatch(entityTag string) *RegionSslPoliciesGetCall { c.ifNoneMatch_ = entityTag return c } @@ -148483,21 +156905,21 @@ func (c *RegionSslCertificatesGetCall) IfNoneMatch(entityTag string) *RegionSslC // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionSslCertificatesGetCall) Context(ctx context.Context) *RegionSslCertificatesGetCall { +func (c *RegionSslPoliciesGetCall) Context(ctx context.Context) *RegionSslPoliciesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionSslCertificatesGetCall) Header() http.Header { +func (c *RegionSslPoliciesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionSslCertificatesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionSslPoliciesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -148510,7 +156932,7 @@ func (c *RegionSslCertificatesGetCall) doRequest(alt string) (*http.Response, er var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/sslCertificates/{sslCertificate}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/sslPolicies/{sslPolicy}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -148518,40 +156940,40 @@ func (c *RegionSslCertificatesGetCall) doRequest(alt string) (*http.Response, er } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "sslCertificate": c.sslCertificate, + "project": c.project, + "region": c.region, + "sslPolicy": c.sslPolicy, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionSslCertificates.get" call. -// Exactly one of *SslCertificate or error will be non-nil. Any non-2xx +// Do executes the "compute.regionSslPolicies.get" call. +// Exactly one of *SslPolicy or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *SslCertificate.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *RegionSslCertificatesGetCall) Do(opts ...googleapi.CallOption) (*SslCertificate, error) { +// *SslPolicy.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionSslPoliciesGetCall) Do(opts ...googleapi.CallOption) (*SslPolicy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &SslCertificate{ + ret := &SslPolicy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -148563,14 +156985,14 @@ func (c *RegionSslCertificatesGetCall) Do(opts ...googleapi.CallOption) (*SslCer } return ret, nil // { - // "description": "Returns the specified SslCertificate resource in the specified region. Get a list of available SSL certificates by making a list() request.", - // "flatPath": "projects/{project}/regions/{region}/sslCertificates/{sslCertificate}", + // "description": "Lists all of the ordered rules present in a single specified policy.", + // "flatPath": "projects/{project}/regions/{region}/sslPolicies/{sslPolicy}", // "httpMethod": "GET", - // "id": "compute.regionSslCertificates.get", + // "id": "compute.regionSslPolicies.get", // "parameterOrder": [ // "project", // "region", - // "sslCertificate" + // "sslPolicy" // ], // "parameters": { // "project": { @@ -148587,17 +157009,16 @@ func (c *RegionSslCertificatesGetCall) Do(opts ...googleapi.CallOption) (*SslCer // "required": true, // "type": "string" // }, - // "sslCertificate": { - // "description": "Name of the SslCertificate resource to return.", + // "sslPolicy": { + // "description": "Name of the SSL policy to update. The name must be 1-63 characters long, and comply with RFC1035.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/sslCertificates/{sslCertificate}", + // "path": "projects/{project}/regions/{region}/sslPolicies/{sslPolicy}", // "response": { - // "$ref": "SslCertificate" + // "$ref": "SslPolicy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -148608,28 +157029,28 @@ func (c *RegionSslCertificatesGetCall) Do(opts ...googleapi.CallOption) (*SslCer } -// method id "compute.regionSslCertificates.insert": +// method id "compute.regionSslPolicies.insert": -type RegionSslCertificatesInsertCall struct { - s *Service - project string - region string - sslcertificate *SslCertificate - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionSslPoliciesInsertCall struct { + s *Service + project string + region string + sslpolicy *SslPolicy + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a SslCertificate resource in the specified project -// and region using the data included in the request +// Insert: Creates a new policy in the specified project and region +// using the data included in the request. // // - project: Project ID for this request. // - region: Name of the region scoping this request. -func (r *RegionSslCertificatesService) Insert(project string, region string, sslcertificate *SslCertificate) *RegionSslCertificatesInsertCall { - c := &RegionSslCertificatesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *RegionSslPoliciesService) Insert(project string, region string, sslpolicy *SslPolicy) *RegionSslPoliciesInsertCall { + c := &RegionSslPoliciesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.sslcertificate = sslcertificate + c.sslpolicy = sslpolicy return c } @@ -148644,7 +157065,7 @@ func (r *RegionSslCertificatesService) Insert(project string, region string, ssl // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *RegionSslCertificatesInsertCall) RequestId(requestId string) *RegionSslCertificatesInsertCall { +func (c *RegionSslPoliciesInsertCall) RequestId(requestId string) *RegionSslPoliciesInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -148652,7 +157073,7 @@ func (c *RegionSslCertificatesInsertCall) RequestId(requestId string) *RegionSsl // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionSslCertificatesInsertCall) Fields(s ...googleapi.Field) *RegionSslCertificatesInsertCall { +func (c *RegionSslPoliciesInsertCall) Fields(s ...googleapi.Field) *RegionSslPoliciesInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -148660,21 +157081,21 @@ func (c *RegionSslCertificatesInsertCall) Fields(s ...googleapi.Field) *RegionSs // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionSslCertificatesInsertCall) Context(ctx context.Context) *RegionSslCertificatesInsertCall { +func (c *RegionSslPoliciesInsertCall) Context(ctx context.Context) *RegionSslPoliciesInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionSslCertificatesInsertCall) Header() http.Header { +func (c *RegionSslPoliciesInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionSslCertificatesInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionSslPoliciesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -148682,14 +157103,14 @@ func (c *RegionSslCertificatesInsertCall) doRequest(alt string) (*http.Response, } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.sslcertificate) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.sslpolicy) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/sslCertificates") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/sslPolicies") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -148703,31 +157124,31 @@ func (c *RegionSslCertificatesInsertCall) doRequest(alt string) (*http.Response, return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionSslCertificates.insert" call. +// Do executes the "compute.regionSslPolicies.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionSslCertificatesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionSslPoliciesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -148741,10 +157162,10 @@ func (c *RegionSslCertificatesInsertCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Creates a SslCertificate resource in the specified project and region using the data included in the request", - // "flatPath": "projects/{project}/regions/{region}/sslCertificates", + // "description": "Creates a new policy in the specified project and region using the data included in the request.", + // "flatPath": "projects/{project}/regions/{region}/sslPolicies", // "httpMethod": "POST", - // "id": "compute.regionSslCertificates.insert", + // "id": "compute.regionSslPolicies.insert", // "parameterOrder": [ // "project", // "region" @@ -148770,9 +157191,9 @@ func (c *RegionSslCertificatesInsertCall) Do(opts ...googleapi.CallOption) (*Ope // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/sslCertificates", + // "path": "projects/{project}/regions/{region}/sslPolicies", // "request": { - // "$ref": "SslCertificate" + // "$ref": "SslPolicy" // }, // "response": { // "$ref": "Operation" @@ -148785,9 +157206,9 @@ func (c *RegionSslCertificatesInsertCall) Do(opts ...googleapi.CallOption) (*Ope } -// method id "compute.regionSslCertificates.list": +// method id "compute.regionSslPolicies.list": -type RegionSslCertificatesListCall struct { +type RegionSslPoliciesListCall struct { s *Service project string region string @@ -148797,13 +157218,13 @@ type RegionSslCertificatesListCall struct { header_ http.Header } -// List: Retrieves the list of SslCertificate resources available to the -// specified project in the specified region. +// List: Lists all the SSL policies that have been configured for the +// specified project and region. // // - project: Project ID for this request. // - region: Name of the region scoping this request. -func (r *RegionSslCertificatesService) List(project string, region string) *RegionSslCertificatesListCall { - c := &RegionSslCertificatesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *RegionSslPoliciesService) List(project string, region string) *RegionSslPoliciesListCall { + c := &RegionSslPoliciesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region return c @@ -148844,7 +157265,7 @@ func (r *RegionSslCertificatesService) List(project string, region string) *Regi // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne // .*instance`. -func (c *RegionSslCertificatesListCall) Filter(filter string) *RegionSslCertificatesListCall { +func (c *RegionSslPoliciesListCall) Filter(filter string) *RegionSslPoliciesListCall { c.urlParams_.Set("filter", filter) return c } @@ -148855,7 +157276,7 @@ func (c *RegionSslCertificatesListCall) Filter(filter string) *RegionSslCertific // a `nextPageToken` that can be used to get the next page of results in // subsequent list requests. Acceptable values are `0` to `500`, // inclusive. (Default: `500`) -func (c *RegionSslCertificatesListCall) MaxResults(maxResults int64) *RegionSslCertificatesListCall { +func (c *RegionSslPoliciesListCall) MaxResults(maxResults int64) *RegionSslPoliciesListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -148869,7 +157290,7 @@ func (c *RegionSslCertificatesListCall) MaxResults(maxResults int64) *RegionSslC // result first). Use this to sort resources like operations so that the // newest operation is returned first. Currently, only sorting by `name` // or `creationTimestamp desc` is supported. -func (c *RegionSslCertificatesListCall) OrderBy(orderBy string) *RegionSslCertificatesListCall { +func (c *RegionSslPoliciesListCall) OrderBy(orderBy string) *RegionSslPoliciesListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -148877,7 +157298,7 @@ func (c *RegionSslCertificatesListCall) OrderBy(orderBy string) *RegionSslCertif // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. -func (c *RegionSslCertificatesListCall) PageToken(pageToken string) *RegionSslCertificatesListCall { +func (c *RegionSslPoliciesListCall) PageToken(pageToken string) *RegionSslPoliciesListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -148886,7 +157307,7 @@ func (c *RegionSslCertificatesListCall) PageToken(pageToken string) *RegionSslCe // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is // false. -func (c *RegionSslCertificatesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionSslCertificatesListCall { +func (c *RegionSslPoliciesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionSslPoliciesListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c } @@ -148894,7 +157315,7 @@ func (c *RegionSslCertificatesListCall) ReturnPartialSuccess(returnPartialSucces // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionSslCertificatesListCall) Fields(s ...googleapi.Field) *RegionSslCertificatesListCall { +func (c *RegionSslPoliciesListCall) Fields(s ...googleapi.Field) *RegionSslPoliciesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -148904,7 +157325,7 @@ func (c *RegionSslCertificatesListCall) Fields(s ...googleapi.Field) *RegionSslC // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *RegionSslCertificatesListCall) IfNoneMatch(entityTag string) *RegionSslCertificatesListCall { +func (c *RegionSslPoliciesListCall) IfNoneMatch(entityTag string) *RegionSslPoliciesListCall { c.ifNoneMatch_ = entityTag return c } @@ -148912,21 +157333,21 @@ func (c *RegionSslCertificatesListCall) IfNoneMatch(entityTag string) *RegionSsl // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionSslCertificatesListCall) Context(ctx context.Context) *RegionSslCertificatesListCall { +func (c *RegionSslPoliciesListCall) Context(ctx context.Context) *RegionSslPoliciesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionSslCertificatesListCall) Header() http.Header { +func (c *RegionSslPoliciesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionSslCertificatesListCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionSslPoliciesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -148939,7 +157360,7 @@ func (c *RegionSslCertificatesListCall) doRequest(alt string) (*http.Response, e var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/sslCertificates") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/sslPolicies") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -148953,33 +157374,33 @@ func (c *RegionSslCertificatesListCall) doRequest(alt string) (*http.Response, e return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionSslCertificates.list" call. -// Exactly one of *SslCertificateList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *SslCertificateList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.regionSslPolicies.list" call. +// Exactly one of *SslPoliciesList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *SslPoliciesList.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *RegionSslCertificatesListCall) Do(opts ...googleapi.CallOption) (*SslCertificateList, error) { +func (c *RegionSslPoliciesListCall) Do(opts ...googleapi.CallOption) (*SslPoliciesList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &SslCertificateList{ + ret := &SslPoliciesList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -148991,10 +157412,10 @@ func (c *RegionSslCertificatesListCall) Do(opts ...googleapi.CallOption) (*SslCe } return ret, nil // { - // "description": "Retrieves the list of SslCertificate resources available to the specified project in the specified region.", - // "flatPath": "projects/{project}/regions/{region}/sslCertificates", + // "description": "Lists all the SSL policies that have been configured for the specified project and region.", + // "flatPath": "projects/{project}/regions/{region}/sslPolicies", // "httpMethod": "GET", - // "id": "compute.regionSslCertificates.list", + // "id": "compute.regionSslPolicies.list", // "parameterOrder": [ // "project", // "region" @@ -149043,9 +157464,9 @@ func (c *RegionSslCertificatesListCall) Do(opts ...googleapi.CallOption) (*SslCe // "type": "boolean" // } // }, - // "path": "projects/{project}/regions/{region}/sslCertificates", + // "path": "projects/{project}/regions/{region}/sslPolicies", // "response": { - // "$ref": "SslCertificateList" + // "$ref": "SslPoliciesList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -149059,7 +157480,7 @@ func (c *RegionSslCertificatesListCall) Do(opts ...googleapi.CallOption) (*SslCe // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *RegionSslCertificatesListCall) Pages(ctx context.Context, f func(*SslCertificateList) error) error { +func (c *RegionSslPoliciesListCall) Pages(ctx context.Context, f func(*SslPoliciesList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -149077,6 +157498,468 @@ func (c *RegionSslCertificatesListCall) Pages(ctx context.Context, f func(*SslCe } } +// method id "compute.regionSslPolicies.listAvailableFeatures": + +type RegionSslPoliciesListAvailableFeaturesCall struct { + s *Service + project string + region string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// ListAvailableFeatures: Lists all features that can be specified in +// the SSL policy when using custom profile. +// +// - project: Project ID for this request. +// - region: Name of the region scoping this request. +func (r *RegionSslPoliciesService) ListAvailableFeatures(project string, region string) *RegionSslPoliciesListAvailableFeaturesCall { + c := &RegionSslPoliciesListAvailableFeaturesCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic +// restarts. You can use filtering on nested fields to filter based on +// resource labels. To filter on multiple expressions, provide each +// separate expression within parentheses. For example: ``` +// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") +// ``` By default, each expression is an `AND` expression. However, you +// can include `AND` and `OR` expressions explicitly. For example: ``` +// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. +func (c *RegionSslPoliciesListAvailableFeaturesCall) Filter(filter string) *RegionSslPoliciesListAvailableFeaturesCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *RegionSslPoliciesListAvailableFeaturesCall) MaxResults(maxResults int64) *RegionSslPoliciesListAvailableFeaturesCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. You can also sort results in +// descending order based on the creation timestamp using +// `orderBy="creationTimestamp desc". This sorts results based on the +// `creationTimestamp` field in reverse chronological order (newest +// result first). Use this to sort resources like operations so that the +// newest operation is returned first. Currently, only sorting by `name` +// or `creationTimestamp desc` is supported. +func (c *RegionSslPoliciesListAvailableFeaturesCall) OrderBy(orderBy string) *RegionSslPoliciesListAvailableFeaturesCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *RegionSslPoliciesListAvailableFeaturesCall) PageToken(pageToken string) *RegionSslPoliciesListAvailableFeaturesCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false. +func (c *RegionSslPoliciesListAvailableFeaturesCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionSslPoliciesListAvailableFeaturesCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionSslPoliciesListAvailableFeaturesCall) Fields(s ...googleapi.Field) *RegionSslPoliciesListAvailableFeaturesCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionSslPoliciesListAvailableFeaturesCall) IfNoneMatch(entityTag string) *RegionSslPoliciesListAvailableFeaturesCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionSslPoliciesListAvailableFeaturesCall) Context(ctx context.Context) *RegionSslPoliciesListAvailableFeaturesCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionSslPoliciesListAvailableFeaturesCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionSslPoliciesListAvailableFeaturesCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/sslPolicies/listAvailableFeatures") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionSslPolicies.listAvailableFeatures" call. +// Exactly one of *SslPoliciesListAvailableFeaturesResponse or error +// will be non-nil. Any non-2xx status code is an error. Response +// headers are in either +// *SslPoliciesListAvailableFeaturesResponse.ServerResponse.Header or +// (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *RegionSslPoliciesListAvailableFeaturesCall) Do(opts ...googleapi.CallOption) (*SslPoliciesListAvailableFeaturesResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &SslPoliciesListAvailableFeaturesResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists all features that can be specified in the SSL policy when using custom profile.", + // "flatPath": "projects/{project}/regions/{region}/sslPolicies/listAvailableFeatures", + // "httpMethod": "GET", + // "id": "compute.regionSslPolicies.listAvailableFeatures", + // "parameterOrder": [ + // "project", + // "region" + // ], + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + // "location": "query", + // "type": "boolean" + // } + // }, + // "path": "projects/{project}/regions/{region}/sslPolicies/listAvailableFeatures", + // "response": { + // "$ref": "SslPoliciesListAvailableFeaturesResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.regionSslPolicies.patch": + +type RegionSslPoliciesPatchCall struct { + s *Service + project string + region string + sslPolicy string + sslpolicy *SslPolicy + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Patches the specified SSL policy with the data included in the +// request. +// +// - project: Project ID for this request. +// - region: Name of the region scoping this request. +// - sslPolicy: Name of the SSL policy to update. The name must be 1-63 +// characters long, and comply with RFC1035. +func (r *RegionSslPoliciesService) Patch(project string, region string, sslPolicy string, sslpolicy *SslPolicy) *RegionSslPoliciesPatchCall { + c := &RegionSslPoliciesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.sslPolicy = sslPolicy + c.sslpolicy = sslpolicy + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *RegionSslPoliciesPatchCall) RequestId(requestId string) *RegionSslPoliciesPatchCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionSslPoliciesPatchCall) Fields(s ...googleapi.Field) *RegionSslPoliciesPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionSslPoliciesPatchCall) Context(ctx context.Context) *RegionSslPoliciesPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionSslPoliciesPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionSslPoliciesPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.sslpolicy) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/sslPolicies/{sslPolicy}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PATCH", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "sslPolicy": c.sslPolicy, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionSslPolicies.patch" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionSslPoliciesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Patches the specified SSL policy with the data included in the request.", + // "flatPath": "projects/{project}/regions/{region}/sslPolicies/{sslPolicy}", + // "httpMethod": "PATCH", + // "id": "compute.regionSslPolicies.patch", + // "parameterOrder": [ + // "project", + // "region", + // "sslPolicy" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "sslPolicy": { + // "description": "Name of the SSL policy to update. The name must be 1-63 characters long, and comply with RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/regions/{region}/sslPolicies/{sslPolicy}", + // "request": { + // "$ref": "SslPolicy" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + // method id "compute.regionTargetHttpProxies.delete": type RegionTargetHttpProxiesDeleteCall struct { @@ -149182,17 +158065,17 @@ func (c *RegionTargetHttpProxiesDeleteCall) Do(opts ...googleapi.CallOption) (*O if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -149269,8 +158152,7 @@ type RegionTargetHttpProxiesGetCall struct { } // Get: Returns the specified TargetHttpProxy resource in the specified -// region. Gets a list of available target HTTP proxies by making a -// list() request. +// region. // // - project: Project ID for this request. // - region: Name of the region scoping this request. @@ -149360,17 +158242,17 @@ func (c *RegionTargetHttpProxiesGetCall) Do(opts ...googleapi.CallOption) (*Targ if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TargetHttpProxy{ ServerResponse: googleapi.ServerResponse{ @@ -149384,7 +158266,7 @@ func (c *RegionTargetHttpProxiesGetCall) Do(opts ...googleapi.CallOption) (*Targ } return ret, nil // { - // "description": "Returns the specified TargetHttpProxy resource in the specified region. Gets a list of available target HTTP proxies by making a list() request.", + // "description": "Returns the specified TargetHttpProxy resource in the specified region.", // "flatPath": "projects/{project}/regions/{region}/targetHttpProxies/{targetHttpProxy}", // "httpMethod": "GET", // "id": "compute.regionTargetHttpProxies.get", @@ -149538,17 +158420,17 @@ func (c *RegionTargetHttpProxiesInsertCall) Do(opts ...googleapi.CallOption) (*O if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -149788,17 +158670,17 @@ func (c *RegionTargetHttpProxiesListCall) Do(opts ...googleapi.CallOption) (*Tar if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TargetHttpProxyList{ ServerResponse: googleapi.ServerResponse{ @@ -150010,17 +158892,17 @@ func (c *RegionTargetHttpProxiesSetUrlMapCall) Do(opts ...googleapi.CallOption) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -150191,17 +159073,17 @@ func (c *RegionTargetHttpsProxiesDeleteCall) Do(opts ...googleapi.CallOption) (* if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -150278,8 +159160,7 @@ type RegionTargetHttpsProxiesGetCall struct { } // Get: Returns the specified TargetHttpsProxy resource in the specified -// region. Gets a list of available target HTTP proxies by making a -// list() request. +// region. // // - project: Project ID for this request. // - region: Name of the region scoping this request. @@ -150369,17 +159250,17 @@ func (c *RegionTargetHttpsProxiesGetCall) Do(opts ...googleapi.CallOption) (*Tar if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TargetHttpsProxy{ ServerResponse: googleapi.ServerResponse{ @@ -150393,7 +159274,7 @@ func (c *RegionTargetHttpsProxiesGetCall) Do(opts ...googleapi.CallOption) (*Tar } return ret, nil // { - // "description": "Returns the specified TargetHttpsProxy resource in the specified region. Gets a list of available target HTTP proxies by making a list() request.", + // "description": "Returns the specified TargetHttpsProxy resource in the specified region.", // "flatPath": "projects/{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}", // "httpMethod": "GET", // "id": "compute.regionTargetHttpsProxies.get", @@ -150547,17 +159428,17 @@ func (c *RegionTargetHttpsProxiesInsertCall) Do(opts ...googleapi.CallOption) (* if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -150797,17 +159678,17 @@ func (c *RegionTargetHttpsProxiesListCall) Do(opts ...googleapi.CallOption) (*Ta if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TargetHttpsProxyList{ ServerResponse: googleapi.ServerResponse{ @@ -151021,17 +159902,17 @@ func (c *RegionTargetHttpsProxiesPatchCall) Do(opts ...googleapi.CallOption) (*O if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -151112,10 +159993,10 @@ type RegionTargetHttpsProxiesSetSslCertificatesCall struct { // SetSslCertificates: Replaces SslCertificates for TargetHttpsProxy. // -// - project: Project ID for this request. -// - region: Name of the region scoping this request. -// - targetHttpsProxy: Name of the TargetHttpsProxy resource to set an -// SslCertificates resource for. +// - project: Project ID for this request. +// - region: Name of the region scoping this request. +// - targetHttpsProxy: Name of the TargetHttpsProxy resource to set an +// SslCertificates resource for. func (r *RegionTargetHttpsProxiesService) SetSslCertificates(project string, region string, targetHttpsProxy string, regiontargethttpsproxiessetsslcertificatesrequest *RegionTargetHttpsProxiesSetSslCertificatesRequest) *RegionTargetHttpsProxiesSetSslCertificatesCall { c := &RegionTargetHttpsProxiesSetSslCertificatesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -151210,17 +160091,17 @@ func (c *RegionTargetHttpsProxiesSetSslCertificatesCall) Do(opts ...googleapi.Ca if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -151263,18 +160144,385 @@ func (c *RegionTargetHttpsProxiesSetSslCertificatesCall) Do(opts ...googleapi.Ca // "location": "query", // "type": "string" // }, - // "targetHttpsProxy": { - // "description": "Name of the TargetHttpsProxy resource to set an SslCertificates resource for.", + // "targetHttpsProxy": { + // "description": "Name of the TargetHttpsProxy resource to set an SslCertificates resource for.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}/setSslCertificates", + // "request": { + // "$ref": "RegionTargetHttpsProxiesSetSslCertificatesRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.regionTargetHttpsProxies.setUrlMap": + +type RegionTargetHttpsProxiesSetUrlMapCall struct { + s *Service + project string + region string + targetHttpsProxy string + urlmapreference *UrlMapReference + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetUrlMap: Changes the URL map for TargetHttpsProxy. +// +// - project: Project ID for this request. +// - region: Name of the region scoping this request. +// - targetHttpsProxy: Name of the TargetHttpsProxy to set a URL map +// for. +func (r *RegionTargetHttpsProxiesService) SetUrlMap(project string, region string, targetHttpsProxy string, urlmapreference *UrlMapReference) *RegionTargetHttpsProxiesSetUrlMapCall { + c := &RegionTargetHttpsProxiesSetUrlMapCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.targetHttpsProxy = targetHttpsProxy + c.urlmapreference = urlmapreference + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *RegionTargetHttpsProxiesSetUrlMapCall) RequestId(requestId string) *RegionTargetHttpsProxiesSetUrlMapCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionTargetHttpsProxiesSetUrlMapCall) Fields(s ...googleapi.Field) *RegionTargetHttpsProxiesSetUrlMapCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionTargetHttpsProxiesSetUrlMapCall) Context(ctx context.Context) *RegionTargetHttpsProxiesSetUrlMapCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionTargetHttpsProxiesSetUrlMapCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionTargetHttpsProxiesSetUrlMapCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.urlmapreference) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}/setUrlMap") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "targetHttpsProxy": c.targetHttpsProxy, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionTargetHttpsProxies.setUrlMap" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionTargetHttpsProxiesSetUrlMapCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Changes the URL map for TargetHttpsProxy.", + // "flatPath": "projects/{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}/setUrlMap", + // "httpMethod": "POST", + // "id": "compute.regionTargetHttpsProxies.setUrlMap", + // "parameterOrder": [ + // "project", + // "region", + // "targetHttpsProxy" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "targetHttpsProxy": { + // "description": "Name of the TargetHttpsProxy to set a URL map for.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}/setUrlMap", + // "request": { + // "$ref": "UrlMapReference" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.regionTargetTcpProxies.delete": + +type RegionTargetTcpProxiesDeleteCall struct { + s *Service + project string + region string + targetTcpProxy string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes the specified TargetTcpProxy resource. +// +// - project: Project ID for this request. +// - region: Name of the region scoping this request. +// - targetTcpProxy: Name of the TargetTcpProxy resource to delete. +func (r *RegionTargetTcpProxiesService) Delete(project string, region string, targetTcpProxy string) *RegionTargetTcpProxiesDeleteCall { + c := &RegionTargetTcpProxiesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.targetTcpProxy = targetTcpProxy + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *RegionTargetTcpProxiesDeleteCall) RequestId(requestId string) *RegionTargetTcpProxiesDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionTargetTcpProxiesDeleteCall) Fields(s ...googleapi.Field) *RegionTargetTcpProxiesDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionTargetTcpProxiesDeleteCall) Context(ctx context.Context) *RegionTargetTcpProxiesDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionTargetTcpProxiesDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionTargetTcpProxiesDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/targetTcpProxies/{targetTcpProxy}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "targetTcpProxy": c.targetTcpProxy, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionTargetTcpProxies.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionTargetTcpProxiesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified TargetTcpProxy resource.", + // "flatPath": "projects/{project}/regions/{region}/targetTcpProxies/{targetTcpProxy}", + // "httpMethod": "DELETE", + // "id": "compute.regionTargetTcpProxies.delete", + // "parameterOrder": [ + // "project", + // "region", + // "targetTcpProxy" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "targetTcpProxy": { + // "description": "Name of the TargetTcpProxy resource to delete.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}/setSslCertificates", - // "request": { - // "$ref": "RegionTargetHttpsProxiesSetSslCertificatesRequest" - // }, + // "path": "projects/{project}/regions/{region}/targetTcpProxies/{targetTcpProxy}", // "response": { // "$ref": "Operation" // }, @@ -151286,31 +160534,200 @@ func (c *RegionTargetHttpsProxiesSetSslCertificatesCall) Do(opts ...googleapi.Ca } -// method id "compute.regionTargetHttpsProxies.setUrlMap": +// method id "compute.regionTargetTcpProxies.get": -type RegionTargetHttpsProxiesSetUrlMapCall struct { - s *Service - project string - region string - targetHttpsProxy string - urlmapreference *UrlMapReference - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionTargetTcpProxiesGetCall struct { + s *Service + project string + region string + targetTcpProxy string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// SetUrlMap: Changes the URL map for TargetHttpsProxy. +// Get: Returns the specified TargetTcpProxy resource. // // - project: Project ID for this request. // - region: Name of the region scoping this request. -// - targetHttpsProxy: Name of the TargetHttpsProxy to set a URL map -// for. -func (r *RegionTargetHttpsProxiesService) SetUrlMap(project string, region string, targetHttpsProxy string, urlmapreference *UrlMapReference) *RegionTargetHttpsProxiesSetUrlMapCall { - c := &RegionTargetHttpsProxiesSetUrlMapCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - targetTcpProxy: Name of the TargetTcpProxy resource to return. +func (r *RegionTargetTcpProxiesService) Get(project string, region string, targetTcpProxy string) *RegionTargetTcpProxiesGetCall { + c := &RegionTargetTcpProxiesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.targetHttpsProxy = targetHttpsProxy - c.urlmapreference = urlmapreference + c.targetTcpProxy = targetTcpProxy + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionTargetTcpProxiesGetCall) Fields(s ...googleapi.Field) *RegionTargetTcpProxiesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionTargetTcpProxiesGetCall) IfNoneMatch(entityTag string) *RegionTargetTcpProxiesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionTargetTcpProxiesGetCall) Context(ctx context.Context) *RegionTargetTcpProxiesGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionTargetTcpProxiesGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionTargetTcpProxiesGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/targetTcpProxies/{targetTcpProxy}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "targetTcpProxy": c.targetTcpProxy, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionTargetTcpProxies.get" call. +// Exactly one of *TargetTcpProxy or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *TargetTcpProxy.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RegionTargetTcpProxiesGetCall) Do(opts ...googleapi.CallOption) (*TargetTcpProxy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &TargetTcpProxy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified TargetTcpProxy resource.", + // "flatPath": "projects/{project}/regions/{region}/targetTcpProxies/{targetTcpProxy}", + // "httpMethod": "GET", + // "id": "compute.regionTargetTcpProxies.get", + // "parameterOrder": [ + // "project", + // "region", + // "targetTcpProxy" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "targetTcpProxy": { + // "description": "Name of the TargetTcpProxy resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/regions/{region}/targetTcpProxies/{targetTcpProxy}", + // "response": { + // "$ref": "TargetTcpProxy" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.regionTargetTcpProxies.insert": + +type RegionTargetTcpProxiesInsertCall struct { + s *Service + project string + region string + targettcpproxy *TargetTcpProxy + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Insert: Creates a TargetTcpProxy resource in the specified project +// and region using the data included in the request. +// +// - project: Project ID for this request. +// - region: Name of the region scoping this request. +func (r *RegionTargetTcpProxiesService) Insert(project string, region string, targettcpproxy *TargetTcpProxy) *RegionTargetTcpProxiesInsertCall { + c := &RegionTargetTcpProxiesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.targettcpproxy = targettcpproxy return c } @@ -151325,7 +160742,7 @@ func (r *RegionTargetHttpsProxiesService) SetUrlMap(project string, region strin // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *RegionTargetHttpsProxiesSetUrlMapCall) RequestId(requestId string) *RegionTargetHttpsProxiesSetUrlMapCall { +func (c *RegionTargetTcpProxiesInsertCall) RequestId(requestId string) *RegionTargetTcpProxiesInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -151333,7 +160750,7 @@ func (c *RegionTargetHttpsProxiesSetUrlMapCall) RequestId(requestId string) *Reg // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionTargetHttpsProxiesSetUrlMapCall) Fields(s ...googleapi.Field) *RegionTargetHttpsProxiesSetUrlMapCall { +func (c *RegionTargetTcpProxiesInsertCall) Fields(s ...googleapi.Field) *RegionTargetTcpProxiesInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -151341,21 +160758,21 @@ func (c *RegionTargetHttpsProxiesSetUrlMapCall) Fields(s ...googleapi.Field) *Re // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionTargetHttpsProxiesSetUrlMapCall) Context(ctx context.Context) *RegionTargetHttpsProxiesSetUrlMapCall { +func (c *RegionTargetTcpProxiesInsertCall) Context(ctx context.Context) *RegionTargetTcpProxiesInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionTargetHttpsProxiesSetUrlMapCall) Header() http.Header { +func (c *RegionTargetTcpProxiesInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionTargetHttpsProxiesSetUrlMapCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionTargetTcpProxiesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -151363,14 +160780,14 @@ func (c *RegionTargetHttpsProxiesSetUrlMapCall) doRequest(alt string) (*http.Res } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.urlmapreference) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.targettcpproxy) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}/setUrlMap") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/targetTcpProxies") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -151378,38 +160795,37 @@ func (c *RegionTargetHttpsProxiesSetUrlMapCall) doRequest(alt string) (*http.Res } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "targetHttpsProxy": c.targetHttpsProxy, + "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionTargetHttpsProxies.setUrlMap" call. +// Do executes the "compute.regionTargetTcpProxies.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionTargetHttpsProxiesSetUrlMapCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionTargetTcpProxiesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -151423,14 +160839,13 @@ func (c *RegionTargetHttpsProxiesSetUrlMapCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Changes the URL map for TargetHttpsProxy.", - // "flatPath": "projects/{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}/setUrlMap", + // "description": "Creates a TargetTcpProxy resource in the specified project and region using the data included in the request.", + // "flatPath": "projects/{project}/regions/{region}/targetTcpProxies", // "httpMethod": "POST", - // "id": "compute.regionTargetHttpsProxies.setUrlMap", + // "id": "compute.regionTargetTcpProxies.insert", // "parameterOrder": [ // "project", - // "region", - // "targetHttpsProxy" + // "region" // ], // "parameters": { // "project": { @@ -151451,18 +160866,11 @@ func (c *RegionTargetHttpsProxiesSetUrlMapCall) Do(opts ...googleapi.CallOption) // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" - // }, - // "targetHttpsProxy": { - // "description": "Name of the TargetHttpsProxy to set a URL map for.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}/setUrlMap", + // "path": "projects/{project}/regions/{region}/targetTcpProxies", // "request": { - // "$ref": "UrlMapReference" + // "$ref": "TargetTcpProxy" // }, // "response": { // "$ref": "Operation" @@ -151475,6 +160883,298 @@ func (c *RegionTargetHttpsProxiesSetUrlMapCall) Do(opts ...googleapi.CallOption) } +// method id "compute.regionTargetTcpProxies.list": + +type RegionTargetTcpProxiesListCall struct { + s *Service + project string + region string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Retrieves a list of TargetTcpProxy resources available to the +// specified project in a given region. +// +// - project: Project ID for this request. +// - region: Name of the region scoping this request. +func (r *RegionTargetTcpProxiesService) List(project string, region string) *RegionTargetTcpProxiesListCall { + c := &RegionTargetTcpProxiesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic +// restarts. You can use filtering on nested fields to filter based on +// resource labels. To filter on multiple expressions, provide each +// separate expression within parentheses. For example: ``` +// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") +// ``` By default, each expression is an `AND` expression. However, you +// can include `AND` and `OR` expressions explicitly. For example: ``` +// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. +func (c *RegionTargetTcpProxiesListCall) Filter(filter string) *RegionTargetTcpProxiesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *RegionTargetTcpProxiesListCall) MaxResults(maxResults int64) *RegionTargetTcpProxiesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. You can also sort results in +// descending order based on the creation timestamp using +// `orderBy="creationTimestamp desc". This sorts results based on the +// `creationTimestamp` field in reverse chronological order (newest +// result first). Use this to sort resources like operations so that the +// newest operation is returned first. Currently, only sorting by `name` +// or `creationTimestamp desc` is supported. +func (c *RegionTargetTcpProxiesListCall) OrderBy(orderBy string) *RegionTargetTcpProxiesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *RegionTargetTcpProxiesListCall) PageToken(pageToken string) *RegionTargetTcpProxiesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false. +func (c *RegionTargetTcpProxiesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionTargetTcpProxiesListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionTargetTcpProxiesListCall) Fields(s ...googleapi.Field) *RegionTargetTcpProxiesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionTargetTcpProxiesListCall) IfNoneMatch(entityTag string) *RegionTargetTcpProxiesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionTargetTcpProxiesListCall) Context(ctx context.Context) *RegionTargetTcpProxiesListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionTargetTcpProxiesListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionTargetTcpProxiesListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/targetTcpProxies") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionTargetTcpProxies.list" call. +// Exactly one of *TargetTcpProxyList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TargetTcpProxyList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RegionTargetTcpProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetTcpProxyList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &TargetTcpProxyList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves a list of TargetTcpProxy resources available to the specified project in a given region.", + // "flatPath": "projects/{project}/regions/{region}/targetTcpProxies", + // "httpMethod": "GET", + // "id": "compute.regionTargetTcpProxies.list", + // "parameterOrder": [ + // "project", + // "region" + // ], + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + // "location": "query", + // "type": "boolean" + // } + // }, + // "path": "projects/{project}/regions/{region}/targetTcpProxies", + // "response": { + // "$ref": "TargetTcpProxyList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *RegionTargetTcpProxiesListCall) Pages(ctx context.Context, f func(*TargetTcpProxyList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + // method id "compute.regionUrlMaps.delete": type RegionUrlMapsDeleteCall struct { @@ -151571,17 +161271,17 @@ func (c *RegionUrlMapsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -151657,8 +161357,7 @@ type RegionUrlMapsGetCall struct { header_ http.Header } -// Get: Returns the specified UrlMap resource. Gets a list of available -// URL maps by making a list() request. +// Get: Returns the specified UrlMap resource. // // - project: Project ID for this request. // - region: Name of the region scoping this request. @@ -151748,17 +161447,17 @@ func (c *RegionUrlMapsGetCall) Do(opts ...googleapi.CallOption) (*UrlMap, error) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &UrlMap{ ServerResponse: googleapi.ServerResponse{ @@ -151772,7 +161471,7 @@ func (c *RegionUrlMapsGetCall) Do(opts ...googleapi.CallOption) (*UrlMap, error) } return ret, nil // { - // "description": "Returns the specified UrlMap resource. Gets a list of available URL maps by making a list() request.", + // "description": "Returns the specified UrlMap resource.", // "flatPath": "projects/{project}/regions/{region}/urlMaps/{urlMap}", // "httpMethod": "GET", // "id": "compute.regionUrlMaps.get", @@ -151917,17 +161616,17 @@ func (c *RegionUrlMapsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -152167,17 +161866,17 @@ func (c *RegionUrlMapsListCall) Do(opts ...googleapi.CallOption) (*UrlMapList, e if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &UrlMapList{ ServerResponse: googleapi.ServerResponse{ @@ -152382,17 +162081,17 @@ func (c *RegionUrlMapsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, e if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -152562,17 +162261,17 @@ func (c *RegionUrlMapsUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -152736,17 +162435,17 @@ func (c *RegionUrlMapsValidateCall) Do(opts ...googleapi.CallOption) (*UrlMapsVa if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &UrlMapsValidateResponse{ ServerResponse: googleapi.ServerResponse{ @@ -152819,10 +162518,9 @@ type RegionsGetCall struct { header_ http.Header } -// Get: Returns the specified Region resource. Gets a list of available -// regions by making a list() request. To decrease latency for this -// method, you can optionally omit any unneeded information from the -// response by using a field mask. This practice is especially +// Get: Returns the specified Region resource. To decrease latency for +// this method, you can optionally omit any unneeded information from +// the response by using a field mask. This practice is especially // recommended for unused quota information (the `quotas` field). To // exclude one or more fields, set your request's `fields` query // parameter to only include the fields you need. For example, to only @@ -152914,17 +162612,17 @@ func (c *RegionsGetCall) Do(opts ...googleapi.CallOption) (*Region, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Region{ ServerResponse: googleapi.ServerResponse{ @@ -152938,7 +162636,7 @@ func (c *RegionsGetCall) Do(opts ...googleapi.CallOption) (*Region, error) { } return ret, nil // { - // "description": "Returns the specified Region resource. Gets a list of available regions by making a list() request. To decrease latency for this method, you can optionally omit any unneeded information from the response by using a field mask. This practice is especially recommended for unused quota information (the `quotas` field). To exclude one or more fields, set your request's `fields` query parameter to only include the fields you need. For example, to only include the `id` and `selfLink` fields, add the query parameter `?fields=id,selfLink` to your request.", + // "description": "Returns the specified Region resource. To decrease latency for this method, you can optionally omit any unneeded information from the response by using a field mask. This practice is especially recommended for unused quota information (the `quotas` field). To exclude one or more fields, set your request's `fields` query parameter to only include the fields you need. For example, to only include the `id` and `selfLink` fields, add the query parameter `?fields=id,selfLink` to your request.", // "flatPath": "projects/{project}/regions/{region}", // "httpMethod": "GET", // "id": "compute.regions.get", @@ -153160,17 +162858,17 @@ func (c *RegionsListCall) Do(opts ...googleapi.CallOption) (*RegionList, error) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &RegionList{ ServerResponse: googleapi.ServerResponse{ @@ -153452,17 +163150,17 @@ func (c *ReservationsAggregatedListCall) Do(opts ...googleapi.CallOption) (*Rese if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ReservationAggregatedList{ ServerResponse: googleapi.ServerResponse{ @@ -153664,17 +163362,17 @@ func (c *ReservationsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, e if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -153840,17 +163538,17 @@ func (c *ReservationsGetCall) Do(opts ...googleapi.CallOption) (*Reservation, er if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Reservation{ ServerResponse: googleapi.ServerResponse{ @@ -154020,17 +163718,17 @@ func (c *ReservationsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Policy{ ServerResponse: googleapi.ServerResponse{ @@ -154204,17 +163902,17 @@ func (c *ReservationsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, e if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -154454,17 +164152,17 @@ func (c *ReservationsListCall) Do(opts ...googleapi.CallOption) (*ReservationLis if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ReservationList{ ServerResponse: googleapi.ServerResponse{ @@ -154678,17 +164376,17 @@ func (c *ReservationsResizeCall) Do(opts ...googleapi.CallOption) (*Operation, e if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -154851,17 +164549,17 @@ func (c *ReservationsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Policy{ ServerResponse: googleapi.ServerResponse{ @@ -155019,17 +164717,17 @@ func (c *ReservationsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (* if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ @@ -155216,17 +164914,17 @@ func (c *ReservationsUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, e if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -155493,17 +165191,17 @@ func (c *ResourcePoliciesAggregatedListCall) Do(opts ...googleapi.CallOption) (* if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ResourcePolicyAggregatedList{ ServerResponse: googleapi.ServerResponse{ @@ -155705,17 +165403,17 @@ func (c *ResourcePoliciesDeleteCall) Do(opts ...googleapi.CallOption) (*Operatio if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -155881,17 +165579,17 @@ func (c *ResourcePoliciesGetCall) Do(opts ...googleapi.CallOption) (*ResourcePol if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ResourcePolicy{ ServerResponse: googleapi.ServerResponse{ @@ -156061,17 +165759,17 @@ func (c *ResourcePoliciesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Po if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Policy{ ServerResponse: googleapi.ServerResponse{ @@ -156244,17 +165942,17 @@ func (c *ResourcePoliciesInsertCall) Do(opts ...googleapi.CallOption) (*Operatio if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -156494,17 +166192,17 @@ func (c *ResourcePoliciesListCall) Do(opts ...googleapi.CallOption) (*ResourcePo if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ResourcePolicyList{ ServerResponse: googleapi.ServerResponse{ @@ -156701,17 +166399,17 @@ func (c *ResourcePoliciesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Po if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Policy{ ServerResponse: googleapi.ServerResponse{ @@ -156869,17 +166567,17 @@ func (c *ResourcePoliciesTestIamPermissionsCall) Do(opts ...googleapi.CallOption if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ @@ -157131,17 +166829,17 @@ func (c *RoutersAggregatedListCall) Do(opts ...googleapi.CallOption) (*RouterAgg if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &RouterAggregatedList{ ServerResponse: googleapi.ServerResponse{ @@ -157343,17 +167041,17 @@ func (c *RoutersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -157429,8 +167127,7 @@ type RoutersGetCall struct { header_ http.Header } -// Get: Returns the specified Router resource. Gets a list of available -// routers by making a list() request. +// Get: Returns the specified Router resource. // // - project: Project ID for this request. // - region: Name of the region for this request. @@ -157520,17 +167217,17 @@ func (c *RoutersGetCall) Do(opts ...googleapi.CallOption) (*Router, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Router{ ServerResponse: googleapi.ServerResponse{ @@ -157544,7 +167241,7 @@ func (c *RoutersGetCall) Do(opts ...googleapi.CallOption) (*Router, error) { } return ret, nil // { - // "description": "Returns the specified Router resource. Gets a list of available routers by making a list() request.", + // "description": "Returns the specified Router resource.", // "flatPath": "projects/{project}/regions/{region}/routers/{router}", // "httpMethod": "GET", // "id": "compute.routers.get", @@ -157605,10 +167302,10 @@ type RoutersGetNatMappingInfoCall struct { // GetNatMappingInfo: Retrieves runtime Nat mapping information of VM // endpoints. // -// - project: Project ID for this request. -// - region: Name of the region for this request. -// - router: Name of the Router resource to query for Nat Mapping -// information of VM endpoints. +// - project: Project ID for this request. +// - region: Name of the region for this request. +// - router: Name of the Router resource to query for Nat Mapping +// information of VM endpoints. func (r *RoutersService) GetNatMappingInfo(project string, region string, router string) *RoutersGetNatMappingInfoCall { c := &RoutersGetNatMappingInfoCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -157776,17 +167473,17 @@ func (c *RoutersGetNatMappingInfoCall) Do(opts ...googleapi.CallOption) (*VmEndp if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &VmEndpointNatMappingsList{ ServerResponse: googleapi.ServerResponse{ @@ -157998,17 +167695,17 @@ func (c *RoutersGetRouterStatusCall) Do(opts ...googleapi.CallOption) (*RouterSt if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &RouterStatusResponse{ ServerResponse: googleapi.ServerResponse{ @@ -158176,17 +167873,17 @@ func (c *RoutersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -158426,17 +168123,17 @@ func (c *RoutersListCall) Do(opts ...googleapi.CallOption) (*RouterList, error) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &RouterList{ ServerResponse: googleapi.ServerResponse{ @@ -158650,17 +168347,17 @@ func (c *RoutersPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -158824,17 +168521,17 @@ func (c *RoutersPreviewCall) Do(opts ...googleapi.CallOption) (*RoutersPreviewRe if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &RoutersPreviewResponse{ ServerResponse: googleapi.ServerResponse{ @@ -159012,17 +168709,17 @@ func (c *RoutersUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -159189,17 +168886,17 @@ func (c *RoutesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -159266,8 +168963,7 @@ type RoutesGetCall struct { header_ http.Header } -// Get: Returns the specified Route resource. Gets a list of available -// routes by making a list() request. +// Get: Returns the specified Route resource. // // - project: Project ID for this request. // - route: Name of the Route resource to return. @@ -159354,17 +169050,17 @@ func (c *RoutesGetCall) Do(opts ...googleapi.CallOption) (*Route, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Route{ ServerResponse: googleapi.ServerResponse{ @@ -159378,7 +169074,7 @@ func (c *RoutesGetCall) Do(opts ...googleapi.CallOption) (*Route, error) { } return ret, nil // { - // "description": "Returns the specified Route resource. Gets a list of available routes by making a list() request.", + // "description": "Returns the specified Route resource.", // "flatPath": "projects/{project}/global/routes/{route}", // "httpMethod": "GET", // "id": "compute.routes.get", @@ -159520,17 +169216,17 @@ func (c *RoutesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -159758,17 +169454,17 @@ func (c *RoutesListCall) Do(opts ...googleapi.CallOption) (*RouteList, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &RouteList{ ServerResponse: googleapi.ServerResponse{ @@ -159959,17 +169655,17 @@ func (c *SecurityPoliciesAddRuleCall) Do(opts ...googleapi.CallOption) (*Operati if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -160218,17 +169914,17 @@ func (c *SecurityPoliciesAggregatedListCall) Do(opts ...googleapi.CallOption) (* if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &SecurityPoliciesAggregatedList{ ServerResponse: googleapi.ServerResponse{ @@ -160426,17 +170122,17 @@ func (c *SecurityPoliciesDeleteCall) Do(opts ...googleapi.CallOption) (*Operatio if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -160591,17 +170287,17 @@ func (c *SecurityPoliciesGetCall) Do(opts ...googleapi.CallOption) (*SecurityPol if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &SecurityPolicy{ ServerResponse: googleapi.ServerResponse{ @@ -160666,9 +170362,9 @@ type SecurityPoliciesGetRuleCall struct { // GetRule: Gets a rule at the specified priority. // -// - project: Project ID for this request. -// - securityPolicy: Name of the security policy to which the queried -// rule belongs. +// - project: Project ID for this request. +// - securityPolicy: Name of the security policy to which the queried +// rule belongs. func (r *SecurityPoliciesService) GetRule(project string, securityPolicy string) *SecurityPoliciesGetRuleCall { c := &SecurityPoliciesGetRuleCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -160759,17 +170455,17 @@ func (c *SecurityPoliciesGetRuleCall) Do(opts ...googleapi.CallOption) (*Securit if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &SecurityPolicyRule{ ServerResponse: googleapi.ServerResponse{ @@ -160938,17 +170634,17 @@ func (c *SecurityPoliciesInsertCall) Do(opts ...googleapi.CallOption) (*Operatio if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -161181,17 +170877,17 @@ func (c *SecurityPoliciesListCall) Do(opts ...googleapi.CallOption) (*SecurityPo if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &SecurityPolicyList{ ServerResponse: googleapi.ServerResponse{ @@ -161464,17 +171160,17 @@ func (c *SecurityPoliciesListPreconfiguredExpressionSetsCall) Do(opts ...googlea if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &SecurityPoliciesListPreconfiguredExpressionSetsResponse{ ServerResponse: googleapi.ServerResponse{ @@ -161558,9 +171254,10 @@ type SecurityPoliciesPatchCall struct { } // Patch: Patches the specified policy with the data included in the -// request. This cannot be used to be update the rules in the policy. -// Please use the per rule methods like addRule, patchRule, and -// removeRule instead. +// request. To clear fields in the rule, leave the fields empty and +// specify them in the updateMask. This cannot be used to be update the +// rules in the policy. Please use the per rule methods like addRule, +// patchRule, and removeRule instead. // // - project: Project ID for this request. // - securityPolicy: Name of the security policy to update. @@ -161656,17 +171353,17 @@ func (c *SecurityPoliciesPatchCall) Do(opts ...googleapi.CallOption) (*Operation if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -161680,7 +171377,7 @@ func (c *SecurityPoliciesPatchCall) Do(opts ...googleapi.CallOption) (*Operation } return ret, nil // { - // "description": "Patches the specified policy with the data included in the request. This cannot be used to be update the rules in the policy. Please use the per rule methods like addRule, patchRule, and removeRule instead.", + // "description": "Patches the specified policy with the data included in the request. To clear fields in the rule, leave the fields empty and specify them in the updateMask. This cannot be used to be update the rules in the policy. Please use the per rule methods like addRule, patchRule, and removeRule instead.", // "flatPath": "projects/{project}/global/securityPolicies/{securityPolicy}", // "httpMethod": "PATCH", // "id": "compute.securityPolicies.patch", @@ -161830,17 +171527,17 @@ func (c *SecurityPoliciesPatchRuleCall) Do(opts ...googleapi.CallOption) (*Opera if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -161996,17 +171693,17 @@ func (c *SecurityPoliciesRemoveRuleCall) Do(opts ...googleapi.CallOption) (*Oper if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -162062,6 +171759,162 @@ func (c *SecurityPoliciesRemoveRuleCall) Do(opts ...googleapi.CallOption) (*Oper } +// method id "compute.securityPolicies.setLabels": + +type SecurityPoliciesSetLabelsCall struct { + s *Service + project string + resource string + globalsetlabelsrequest *GlobalSetLabelsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetLabels: Sets the labels on a security policy. To learn more about +// labels, read the Labeling Resources documentation. +// +// - project: Project ID for this request. +// - resource: Name or id of the resource for this request. +func (r *SecurityPoliciesService) SetLabels(project string, resource string, globalsetlabelsrequest *GlobalSetLabelsRequest) *SecurityPoliciesSetLabelsCall { + c := &SecurityPoliciesSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.resource = resource + c.globalsetlabelsrequest = globalsetlabelsrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *SecurityPoliciesSetLabelsCall) Fields(s ...googleapi.Field) *SecurityPoliciesSetLabelsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *SecurityPoliciesSetLabelsCall) Context(ctx context.Context) *SecurityPoliciesSetLabelsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *SecurityPoliciesSetLabelsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *SecurityPoliciesSetLabelsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.globalsetlabelsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/securityPolicies/{resource}/setLabels") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.securityPolicies.setLabels" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *SecurityPoliciesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Sets the labels on a security policy. To learn more about labels, read the Labeling Resources documentation.", + // "flatPath": "projects/{project}/global/securityPolicies/{resource}/setLabels", + // "httpMethod": "POST", + // "id": "compute.securityPolicies.setLabels", + // "parameterOrder": [ + // "project", + // "resource" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/global/securityPolicies/{resource}/setLabels", + // "request": { + // "$ref": "GlobalSetLabelsRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + // method id "compute.serviceAttachments.aggregatedList": type ServiceAttachmentsAggregatedListCall struct { @@ -162253,17 +172106,17 @@ func (c *ServiceAttachmentsAggregatedListCall) Do(opts ...googleapi.CallOption) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ServiceAttachmentAggregatedList{ ServerResponse: googleapi.ServerResponse{ @@ -162374,10 +172227,10 @@ type ServiceAttachmentsDeleteCall struct { // Delete: Deletes the specified ServiceAttachment in the given scope // -// - project: Project ID for this request. -// - region: Name of the region of this request. -// - serviceAttachment: Name of the ServiceAttachment resource to -// delete. +// - project: Project ID for this request. +// - region: Name of the region of this request. +// - serviceAttachment: Name of the ServiceAttachment resource to +// delete. func (r *ServiceAttachmentsService) Delete(project string, region string, serviceAttachment string) *ServiceAttachmentsDeleteCall { c := &ServiceAttachmentsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -162466,17 +172319,17 @@ func (c *ServiceAttachmentsDeleteCall) Do(opts ...googleapi.CallOption) (*Operat if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -162555,10 +172408,10 @@ type ServiceAttachmentsGetCall struct { // Get: Returns the specified ServiceAttachment resource in the given // scope. // -// - project: Project ID for this request. -// - region: Name of the region of this request. -// - serviceAttachment: Name of the ServiceAttachment resource to -// return. +// - project: Project ID for this request. +// - region: Name of the region of this request. +// - serviceAttachment: Name of the ServiceAttachment resource to +// return. func (r *ServiceAttachmentsService) Get(project string, region string, serviceAttachment string) *ServiceAttachmentsGetCall { c := &ServiceAttachmentsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -162644,17 +172497,17 @@ func (c *ServiceAttachmentsGetCall) Do(opts ...googleapi.CallOption) (*ServiceAt if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ServiceAttachment{ ServerResponse: googleapi.ServerResponse{ @@ -162824,17 +172677,17 @@ func (c *ServiceAttachmentsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (* if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Policy{ ServerResponse: googleapi.ServerResponse{ @@ -163008,17 +172861,17 @@ func (c *ServiceAttachmentsInsertCall) Do(opts ...googleapi.CallOption) (*Operat if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -163257,17 +173110,17 @@ func (c *ServiceAttachmentsListCall) Do(opts ...googleapi.CallOption) (*ServiceA if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ServiceAttachmentList{ ServerResponse: googleapi.ServerResponse{ @@ -163384,12 +173237,12 @@ type ServiceAttachmentsPatchCall struct { // included in the request. This method supports PATCH semantics and // uses JSON merge patch format and processing rules. // -// - project: Project ID for this request. -// - region: The region scoping this request and should conform to -// RFC1035. -// - serviceAttachment: The resource id of the ServiceAttachment to -// patch. It should conform to RFC1035 resource name or be a string -// form on an unsigned long number. +// - project: Project ID for this request. +// - region: The region scoping this request and should conform to +// RFC1035. +// - serviceAttachment: The resource id of the ServiceAttachment to +// patch. It should conform to RFC1035 resource name or be a string +// form on an unsigned long number. func (r *ServiceAttachmentsService) Patch(project string, region string, serviceAttachment string, serviceattachment *ServiceAttachment) *ServiceAttachmentsPatchCall { c := &ServiceAttachmentsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -163484,17 +173337,17 @@ func (c *ServiceAttachmentsPatchCall) Do(opts ...googleapi.CallOption) (*Operati if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -163655,17 +173508,17 @@ func (c *ServiceAttachmentsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (* if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Policy{ ServerResponse: googleapi.ServerResponse{ @@ -163823,17 +173676,17 @@ func (c *ServiceAttachmentsTestIamPermissionsCall) Do(opts ...googleapi.CallOpti if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ @@ -164001,17 +173854,17 @@ func (c *SnapshotsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, erro if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -164078,8 +173931,7 @@ type SnapshotsGetCall struct { header_ http.Header } -// Get: Returns the specified Snapshot resource. Gets a list of -// available snapshots by making a list() request. +// Get: Returns the specified Snapshot resource. // // - project: Project ID for this request. // - snapshot: Name of the Snapshot resource to return. @@ -164166,17 +174018,17 @@ func (c *SnapshotsGetCall) Do(opts ...googleapi.CallOption) (*Snapshot, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Snapshot{ ServerResponse: googleapi.ServerResponse{ @@ -164190,7 +174042,7 @@ func (c *SnapshotsGetCall) Do(opts ...googleapi.CallOption) (*Snapshot, error) { } return ret, nil // { - // "description": "Returns the specified Snapshot resource. Gets a list of available snapshots by making a list() request.", + // "description": "Returns the specified Snapshot resource.", // "flatPath": "projects/{project}/global/snapshots/{snapshot}", // "httpMethod": "GET", // "id": "compute.snapshots.get", @@ -164334,17 +174186,17 @@ func (c *SnapshotsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, e if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Policy{ ServerResponse: googleapi.ServerResponse{ @@ -164509,17 +174361,17 @@ func (c *SnapshotsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, erro if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -164747,17 +174599,17 @@ func (c *SnapshotsListCall) Do(opts ...googleapi.CallOption) (*SnapshotList, err if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &SnapshotList{ ServerResponse: googleapi.ServerResponse{ @@ -164942,17 +174794,17 @@ func (c *SnapshotsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, e if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Policy{ ServerResponse: googleapi.ServerResponse{ @@ -165098,17 +174950,17 @@ func (c *SnapshotsSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, e if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -165254,17 +175106,17 @@ func (c *SnapshotsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Tes if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ @@ -165509,17 +175361,17 @@ func (c *SslCertificatesAggregatedListCall) Do(opts ...googleapi.CallOption) (*S if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &SslCertificateAggregatedList{ ServerResponse: googleapi.ServerResponse{ @@ -165717,17 +175569,17 @@ func (c *SslCertificatesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -165794,8 +175646,7 @@ type SslCertificatesGetCall struct { header_ http.Header } -// Get: Returns the specified SslCertificate resource. Gets a list of -// available SSL certificates by making a list() request. +// Get: Returns the specified SslCertificate resource. // // - project: Project ID for this request. // - sslCertificate: Name of the SslCertificate resource to return. @@ -165882,17 +175733,17 @@ func (c *SslCertificatesGetCall) Do(opts ...googleapi.CallOption) (*SslCertifica if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &SslCertificate{ ServerResponse: googleapi.ServerResponse{ @@ -165906,7 +175757,7 @@ func (c *SslCertificatesGetCall) Do(opts ...googleapi.CallOption) (*SslCertifica } return ret, nil // { - // "description": "Returns the specified SslCertificate resource. Gets a list of available SSL certificates by making a list() request.", + // "description": "Returns the specified SslCertificate resource.", // "flatPath": "projects/{project}/global/sslCertificates/{sslCertificate}", // "httpMethod": "GET", // "id": "compute.sslCertificates.get", @@ -166048,17 +175899,17 @@ func (c *SslCertificatesInsertCall) Do(opts ...googleapi.CallOption) (*Operation if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -166286,17 +176137,17 @@ func (c *SslCertificatesListCall) Do(opts ...googleapi.CallOption) (*SslCertific if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &SslCertificateList{ ServerResponse: googleapi.ServerResponse{ @@ -166388,6 +176239,304 @@ func (c *SslCertificatesListCall) Pages(ctx context.Context, f func(*SslCertific } } +// method id "compute.sslPolicies.aggregatedList": + +type SslPoliciesAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// AggregatedList: Retrieves the list of all SslPolicy resources, +// regional and global, available to the specified project. +// +// - project: Name of the project scoping this request. +func (r *SslPoliciesService) AggregatedList(project string) *SslPoliciesAggregatedListCall { + c := &SslPoliciesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic +// restarts. You can use filtering on nested fields to filter based on +// resource labels. To filter on multiple expressions, provide each +// separate expression within parentheses. For example: ``` +// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") +// ``` By default, each expression is an `AND` expression. However, you +// can include `AND` and `OR` expressions explicitly. For example: ``` +// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. +func (c *SslPoliciesAggregatedListCall) Filter(filter string) *SslPoliciesAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// IncludeAllScopes sets the optional parameter "includeAllScopes": +// Indicates whether every visible scope for each scope type (zone, +// region, global) should be included in the response. For new resource +// types added after this field, the flag has no effect as new resource +// types will always include every visible scope for each scope type in +// response. For resource types which predate this field, if this flag +// is omitted or false, only scopes of the scope types where the +// resource type is expected to be found will be included. +func (c *SslPoliciesAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *SslPoliciesAggregatedListCall { + c.urlParams_.Set("includeAllScopes", fmt.Sprint(includeAllScopes)) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *SslPoliciesAggregatedListCall) MaxResults(maxResults int64) *SslPoliciesAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. You can also sort results in +// descending order based on the creation timestamp using +// `orderBy="creationTimestamp desc". This sorts results based on the +// `creationTimestamp` field in reverse chronological order (newest +// result first). Use this to sort resources like operations so that the +// newest operation is returned first. Currently, only sorting by `name` +// or `creationTimestamp desc` is supported. +func (c *SslPoliciesAggregatedListCall) OrderBy(orderBy string) *SslPoliciesAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *SslPoliciesAggregatedListCall) PageToken(pageToken string) *SslPoliciesAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false. +func (c *SslPoliciesAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *SslPoliciesAggregatedListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *SslPoliciesAggregatedListCall) Fields(s ...googleapi.Field) *SslPoliciesAggregatedListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *SslPoliciesAggregatedListCall) IfNoneMatch(entityTag string) *SslPoliciesAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *SslPoliciesAggregatedListCall) Context(ctx context.Context) *SslPoliciesAggregatedListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *SslPoliciesAggregatedListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *SslPoliciesAggregatedListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/aggregated/sslPolicies") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.sslPolicies.aggregatedList" call. +// Exactly one of *SslPoliciesAggregatedList or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *SslPoliciesAggregatedList.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *SslPoliciesAggregatedListCall) Do(opts ...googleapi.CallOption) (*SslPoliciesAggregatedList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &SslPoliciesAggregatedList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of all SslPolicy resources, regional and global, available to the specified project.", + // "flatPath": "projects/{project}/aggregated/sslPolicies", + // "httpMethod": "GET", + // "id": "compute.sslPolicies.aggregatedList", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "location": "query", + // "type": "string" + // }, + // "includeAllScopes": { + // "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + // "location": "query", + // "type": "boolean" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Name of the project scoping this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + // "location": "query", + // "type": "boolean" + // } + // }, + // "path": "projects/{project}/aggregated/sslPolicies", + // "response": { + // "$ref": "SslPoliciesAggregatedList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *SslPoliciesAggregatedListCall) Pages(ctx context.Context, f func(*SslPoliciesAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + // method id "compute.sslPolicies.delete": type SslPoliciesDeleteCall struct { @@ -166403,9 +176552,9 @@ type SslPoliciesDeleteCall struct { // be deleted only if it is not in use by any TargetHttpsProxy or // TargetSslProxy resources. // -// - project: Project ID for this request. -// - sslPolicy: Name of the SSL policy to delete. The name must be 1-63 -// characters long, and comply with RFC1035. +// - project: Project ID for this request. +// - sslPolicy: Name of the SSL policy to delete. The name must be 1-63 +// characters long, and comply with RFC1035. func (r *SslPoliciesService) Delete(project string, sslPolicy string) *SslPoliciesDeleteCall { c := &SslPoliciesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -166492,17 +176641,17 @@ func (c *SslPoliciesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, er if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -166571,9 +176720,9 @@ type SslPoliciesGetCall struct { // Get: Lists all of the ordered rules present in a single specified // policy. // -// - project: Project ID for this request. -// - sslPolicy: Name of the SSL policy to update. The name must be 1-63 -// characters long, and comply with RFC1035. +// - project: Project ID for this request. +// - sslPolicy: Name of the SSL policy to update. The name must be 1-63 +// characters long, and comply with RFC1035. func (r *SslPoliciesService) Get(project string, sslPolicy string) *SslPoliciesGetCall { c := &SslPoliciesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -166657,17 +176806,17 @@ func (c *SslPoliciesGetCall) Do(opts ...googleapi.CallOption) (*SslPolicy, error if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &SslPolicy{ ServerResponse: googleapi.ServerResponse{ @@ -166728,8 +176877,7 @@ type SslPoliciesInsertCall struct { header_ http.Header } -// Insert: Returns the specified SSL policy resource. Gets a list of -// available SSL policies by making a list() request. +// Insert: Returns the specified SSL policy resource. // // - project: Project ID for this request. func (r *SslPoliciesService) Insert(project string, sslpolicy *SslPolicy) *SslPoliciesInsertCall { @@ -166822,17 +176970,17 @@ func (c *SslPoliciesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, er if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -166846,7 +176994,7 @@ func (c *SslPoliciesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, er } return ret, nil // { - // "description": "Returns the specified SSL policy resource. Gets a list of available SSL policies by making a list() request.", + // "description": "Returns the specified SSL policy resource.", // "flatPath": "projects/{project}/global/sslPolicies", // "httpMethod": "POST", // "id": "compute.sslPolicies.insert", @@ -167060,17 +177208,17 @@ func (c *SslPoliciesListCall) Do(opts ...googleapi.CallOption) (*SslPoliciesList if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &SslPoliciesList{ ServerResponse: googleapi.ServerResponse{ @@ -167342,17 +177490,17 @@ func (c *SslPoliciesListAvailableFeaturesCall) Do(opts ...googleapi.CallOption) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &SslPoliciesListAvailableFeaturesResponse{ ServerResponse: googleapi.ServerResponse{ @@ -167438,9 +177586,9 @@ type SslPoliciesPatchCall struct { // Patch: Patches the specified SSL policy with the data included in the // request. // -// - project: Project ID for this request. -// - sslPolicy: Name of the SSL policy to update. The name must be 1-63 -// characters long, and comply with RFC1035. +// - project: Project ID for this request. +// - sslPolicy: Name of the SSL policy to update. The name must be 1-63 +// characters long, and comply with RFC1035. func (r *SslPoliciesService) Patch(project string, sslPolicy string, sslpolicy *SslPolicy) *SslPoliciesPatchCall { c := &SslPoliciesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -167533,17 +177681,17 @@ func (c *SslPoliciesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, err if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -167790,17 +177938,17 @@ func (c *SubnetworksAggregatedListCall) Do(opts ...googleapi.CallOption) (*Subne if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &SubnetworkAggregatedList{ ServerResponse: googleapi.ServerResponse{ @@ -168002,17 +178150,17 @@ func (c *SubnetworksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, er if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -168188,17 +178336,17 @@ func (c *SubnetworksExpandIpCidrRangeCall) Do(opts ...googleapi.CallOption) (*Op if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -168277,8 +178425,7 @@ type SubnetworksGetCall struct { header_ http.Header } -// Get: Returns the specified subnetwork. Gets a list of available -// subnetworks list() request. +// Get: Returns the specified subnetwork. // // - project: Project ID for this request. // - region: Name of the region scoping this request. @@ -168368,17 +178515,17 @@ func (c *SubnetworksGetCall) Do(opts ...googleapi.CallOption) (*Subnetwork, erro if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Subnetwork{ ServerResponse: googleapi.ServerResponse{ @@ -168392,7 +178539,7 @@ func (c *SubnetworksGetCall) Do(opts ...googleapi.CallOption) (*Subnetwork, erro } return ret, nil // { - // "description": "Returns the specified subnetwork. Gets a list of available subnetworks list() request.", + // "description": "Returns the specified subnetwork.", // "flatPath": "projects/{project}/regions/{region}/subnetworks/{subnetwork}", // "httpMethod": "GET", // "id": "compute.subnetworks.get", @@ -168548,17 +178695,17 @@ func (c *SubnetworksGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Policy{ ServerResponse: googleapi.ServerResponse{ @@ -168732,17 +178879,17 @@ func (c *SubnetworksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, er if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -168982,17 +179129,17 @@ func (c *SubnetworksListCall) Do(opts ...googleapi.CallOption) (*SubnetworkList, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &SubnetworkList{ ServerResponse: googleapi.ServerResponse{ @@ -169270,17 +179417,17 @@ func (c *SubnetworksListUsableCall) Do(opts ...googleapi.CallOption) (*UsableSub if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &UsableSubnetworksAggregatedList{ ServerResponse: googleapi.ServerResponse{ @@ -169502,17 +179649,17 @@ func (c *SubnetworksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, err if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -169681,17 +179828,17 @@ func (c *SubnetworksSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Policy{ ServerResponse: googleapi.ServerResponse{ @@ -169866,17 +180013,17 @@ func (c *SubnetworksSetPrivateIpGoogleAccessCall) Do(opts ...googleapi.CallOptio if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -170039,17 +180186,17 @@ func (c *SubnetworksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*T if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ @@ -170212,17 +180359,17 @@ func (c *TargetGrpcProxiesDeleteCall) Do(opts ...googleapi.CallOption) (*Operati if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -170377,17 +180524,17 @@ func (c *TargetGrpcProxiesGetCall) Do(opts ...googleapi.CallOption) (*TargetGrpc if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TargetGrpcProxy{ ServerResponse: googleapi.ServerResponse{ @@ -170543,17 +180690,17 @@ func (c *TargetGrpcProxiesInsertCall) Do(opts ...googleapi.CallOption) (*Operati if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -170780,17 +180927,17 @@ func (c *TargetGrpcProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetGrp if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TargetGrpcProxyList{ ServerResponse: googleapi.ServerResponse{ @@ -170992,17 +181139,17 @@ func (c *TargetGrpcProxiesPatchCall) Do(opts ...googleapi.CallOption) (*Operatio if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -171251,17 +181398,17 @@ func (c *TargetHttpProxiesAggregatedListCall) Do(opts ...googleapi.CallOption) ( if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TargetHttpProxyAggregatedList{ ServerResponse: googleapi.ServerResponse{ @@ -171459,17 +181606,17 @@ func (c *TargetHttpProxiesDeleteCall) Do(opts ...googleapi.CallOption) (*Operati if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -171536,8 +181683,7 @@ type TargetHttpProxiesGetCall struct { header_ http.Header } -// Get: Returns the specified TargetHttpProxy resource. Gets a list of -// available target HTTP proxies by making a list() request. +// Get: Returns the specified TargetHttpProxy resource. // // - project: Project ID for this request. // - targetHttpProxy: Name of the TargetHttpProxy resource to return. @@ -171624,17 +181770,17 @@ func (c *TargetHttpProxiesGetCall) Do(opts ...googleapi.CallOption) (*TargetHttp if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TargetHttpProxy{ ServerResponse: googleapi.ServerResponse{ @@ -171648,7 +181794,7 @@ func (c *TargetHttpProxiesGetCall) Do(opts ...googleapi.CallOption) (*TargetHttp } return ret, nil // { - // "description": "Returns the specified TargetHttpProxy resource. Gets a list of available target HTTP proxies by making a list() request.", + // "description": "Returns the specified TargetHttpProxy resource.", // "flatPath": "projects/{project}/global/targetHttpProxies/{targetHttpProxy}", // "httpMethod": "GET", // "id": "compute.targetHttpProxies.get", @@ -171790,17 +181936,17 @@ func (c *TargetHttpProxiesInsertCall) Do(opts ...googleapi.CallOption) (*Operati if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -172028,17 +182174,17 @@ func (c *TargetHttpProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetHtt if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TargetHttpProxyList{ ServerResponse: googleapi.ServerResponse{ @@ -172240,17 +182386,17 @@ func (c *TargetHttpProxiesPatchCall) Do(opts ...googleapi.CallOption) (*Operatio if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -172416,17 +182562,17 @@ func (c *TargetHttpProxiesSetUrlMapCall) Do(opts ...googleapi.CallOption) (*Oper if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -172675,17 +182821,17 @@ func (c *TargetHttpsProxiesAggregatedListCall) Do(opts ...googleapi.CallOption) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TargetHttpsProxyAggregatedList{ ServerResponse: googleapi.ServerResponse{ @@ -172883,17 +183029,17 @@ func (c *TargetHttpsProxiesDeleteCall) Do(opts ...googleapi.CallOption) (*Operat if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -172960,8 +183106,7 @@ type TargetHttpsProxiesGetCall struct { header_ http.Header } -// Get: Returns the specified TargetHttpsProxy resource. Gets a list of -// available target HTTPS proxies by making a list() request. +// Get: Returns the specified TargetHttpsProxy resource. // // - project: Project ID for this request. // - targetHttpsProxy: Name of the TargetHttpsProxy resource to return. @@ -173048,17 +183193,17 @@ func (c *TargetHttpsProxiesGetCall) Do(opts ...googleapi.CallOption) (*TargetHtt if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TargetHttpsProxy{ ServerResponse: googleapi.ServerResponse{ @@ -173072,7 +183217,7 @@ func (c *TargetHttpsProxiesGetCall) Do(opts ...googleapi.CallOption) (*TargetHtt } return ret, nil // { - // "description": "Returns the specified TargetHttpsProxy resource. Gets a list of available target HTTPS proxies by making a list() request.", + // "description": "Returns the specified TargetHttpsProxy resource.", // "flatPath": "projects/{project}/global/targetHttpsProxies/{targetHttpsProxy}", // "httpMethod": "GET", // "id": "compute.targetHttpsProxies.get", @@ -173214,17 +183359,17 @@ func (c *TargetHttpsProxiesInsertCall) Do(opts ...googleapi.CallOption) (*Operat if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -173452,17 +183597,17 @@ func (c *TargetHttpsProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetHt if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TargetHttpsProxyList{ ServerResponse: googleapi.ServerResponse{ @@ -173664,17 +183809,17 @@ func (c *TargetHttpsProxiesPatchCall) Do(opts ...googleapi.CallOption) (*Operati if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -173746,10 +183891,10 @@ type TargetHttpsProxiesSetCertificateMapCall struct { // SetCertificateMap: Changes the Certificate Map for TargetHttpsProxy. // -// - project: Project ID for this request. -// - targetHttpsProxy: Name of the TargetHttpsProxy resource whose -// CertificateMap is to be set. The name must be 1-63 characters long, -// and comply with RFC1035. +// - project: Project ID for this request. +// - targetHttpsProxy: Name of the TargetHttpsProxy resource whose +// CertificateMap is to be set. The name must be 1-63 characters long, +// and comply with RFC1035. func (r *TargetHttpsProxiesService) SetCertificateMap(project string, targetHttpsProxy string, targethttpsproxiessetcertificatemaprequest *TargetHttpsProxiesSetCertificateMapRequest) *TargetHttpsProxiesSetCertificateMapCall { c := &TargetHttpsProxiesSetCertificateMapCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -173842,17 +183987,17 @@ func (c *TargetHttpsProxiesSetCertificateMapCall) Do(opts ...googleapi.CallOptio if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -173923,9 +184068,9 @@ type TargetHttpsProxiesSetQuicOverrideCall struct { // SetQuicOverride: Sets the QUIC override policy for TargetHttpsProxy. // -// - project: Project ID for this request. -// - targetHttpsProxy: Name of the TargetHttpsProxy resource to set the -// QUIC override policy for. The name should conform to RFC1035. +// - project: Project ID for this request. +// - targetHttpsProxy: Name of the TargetHttpsProxy resource to set the +// QUIC override policy for. The name should conform to RFC1035. func (r *TargetHttpsProxiesService) SetQuicOverride(project string, targetHttpsProxy string, targethttpsproxiessetquicoverriderequest *TargetHttpsProxiesSetQuicOverrideRequest) *TargetHttpsProxiesSetQuicOverrideCall { c := &TargetHttpsProxiesSetQuicOverrideCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -174018,17 +184163,17 @@ func (c *TargetHttpsProxiesSetQuicOverrideCall) Do(opts ...googleapi.CallOption) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -174099,9 +184244,9 @@ type TargetHttpsProxiesSetSslCertificatesCall struct { // SetSslCertificates: Replaces SslCertificates for TargetHttpsProxy. // -// - project: Project ID for this request. -// - targetHttpsProxy: Name of the TargetHttpsProxy resource to set an -// SslCertificates resource for. +// - project: Project ID for this request. +// - targetHttpsProxy: Name of the TargetHttpsProxy resource to set an +// SslCertificates resource for. func (r *TargetHttpsProxiesService) SetSslCertificates(project string, targetHttpsProxy string, targethttpsproxiessetsslcertificatesrequest *TargetHttpsProxiesSetSslCertificatesRequest) *TargetHttpsProxiesSetSslCertificatesCall { c := &TargetHttpsProxiesSetSslCertificatesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -174194,17 +184339,17 @@ func (c *TargetHttpsProxiesSetSslCertificatesCall) Do(opts ...googleapi.CallOpti if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -174280,10 +184425,10 @@ type TargetHttpsProxiesSetSslPolicyCall struct { // balancer. They do not affect the connection between the load balancer // and the backends. // -// - project: Project ID for this request. -// - targetHttpsProxy: Name of the TargetHttpsProxy resource whose SSL -// policy is to be set. The name must be 1-63 characters long, and -// comply with RFC1035. +// - project: Project ID for this request. +// - targetHttpsProxy: Name of the TargetHttpsProxy resource whose SSL +// policy is to be set. The name must be 1-63 characters long, and +// comply with RFC1035. func (r *TargetHttpsProxiesService) SetSslPolicy(project string, targetHttpsProxy string, sslpolicyreference *SslPolicyReference) *TargetHttpsProxiesSetSslPolicyCall { c := &TargetHttpsProxiesSetSslPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -174376,17 +184521,17 @@ func (c *TargetHttpsProxiesSetSslPolicyCall) Do(opts ...googleapi.CallOption) (* if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -174457,9 +184602,9 @@ type TargetHttpsProxiesSetUrlMapCall struct { // SetUrlMap: Changes the URL map for TargetHttpsProxy. // -// - project: Project ID for this request. -// - targetHttpsProxy: Name of the TargetHttpsProxy resource whose URL -// map is to be set. +// - project: Project ID for this request. +// - targetHttpsProxy: Name of the TargetHttpsProxy resource whose URL +// map is to be set. func (r *TargetHttpsProxiesService) SetUrlMap(project string, targetHttpsProxy string, urlmapreference *UrlMapReference) *TargetHttpsProxiesSetUrlMapCall { c := &TargetHttpsProxiesSetUrlMapCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -174552,17 +184697,17 @@ func (c *TargetHttpsProxiesSetUrlMapCall) Do(opts ...googleapi.CallOption) (*Ope if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -174810,17 +184955,17 @@ func (c *TargetInstancesAggregatedListCall) Do(opts ...googleapi.CallOption) (*T if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TargetInstanceAggregatedList{ ServerResponse: googleapi.ServerResponse{ @@ -175022,17 +185167,17 @@ func (c *TargetInstancesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -175108,8 +185253,7 @@ type TargetInstancesGetCall struct { header_ http.Header } -// Get: Returns the specified TargetInstance resource. Gets a list of -// available target instances by making a list() request. +// Get: Returns the specified TargetInstance resource. // // - project: Project ID for this request. // - targetInstance: Name of the TargetInstance resource to return. @@ -175199,17 +185343,17 @@ func (c *TargetInstancesGetCall) Do(opts ...googleapi.CallOption) (*TargetInstan if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TargetInstance{ ServerResponse: googleapi.ServerResponse{ @@ -175223,7 +185367,7 @@ func (c *TargetInstancesGetCall) Do(opts ...googleapi.CallOption) (*TargetInstan } return ret, nil // { - // "description": "Returns the specified TargetInstance resource. Gets a list of available target instances by making a list() request.", + // "description": "Returns the specified TargetInstance resource.", // "flatPath": "projects/{project}/zones/{zone}/targetInstances/{targetInstance}", // "httpMethod": "GET", // "id": "compute.targetInstances.get", @@ -175377,17 +185521,17 @@ func (c *TargetInstancesInsertCall) Do(opts ...googleapi.CallOption) (*Operation if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -175627,17 +185771,17 @@ func (c *TargetInstancesListCall) Do(opts ...googleapi.CallOption) (*TargetInsta if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TargetInstanceList{ ServerResponse: googleapi.ServerResponse{ @@ -175849,17 +185993,17 @@ func (c *TargetPoolsAddHealthCheckCall) Do(opts ...googleapi.CallOption) (*Opera if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -176037,17 +186181,17 @@ func (c *TargetPoolsAddInstanceCall) Do(opts ...googleapi.CallOption) (*Operatio if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -176303,17 +186447,17 @@ func (c *TargetPoolsAggregatedListCall) Do(opts ...googleapi.CallOption) (*Targe if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TargetPoolAggregatedList{ ServerResponse: googleapi.ServerResponse{ @@ -176515,17 +186659,17 @@ func (c *TargetPoolsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, er if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -176601,8 +186745,7 @@ type TargetPoolsGetCall struct { header_ http.Header } -// Get: Returns the specified target pool. Gets a list of available -// target pools by making a list() request. +// Get: Returns the specified target pool. // // - project: Project ID for this request. // - region: Name of the region scoping this request. @@ -176692,17 +186835,17 @@ func (c *TargetPoolsGetCall) Do(opts ...googleapi.CallOption) (*TargetPool, erro if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TargetPool{ ServerResponse: googleapi.ServerResponse{ @@ -176716,7 +186859,7 @@ func (c *TargetPoolsGetCall) Do(opts ...googleapi.CallOption) (*TargetPool, erro } return ret, nil // { - // "description": "Returns the specified target pool. Gets a list of available target pools by making a list() request.", + // "description": "Returns the specified target pool.", // "flatPath": "projects/{project}/regions/{region}/targetPools/{targetPool}", // "httpMethod": "GET", // "id": "compute.targetPools.get", @@ -176777,10 +186920,10 @@ type TargetPoolsGetHealthCall struct { // GetHealth: Gets the most recent health check results for each IP for // the instance that is referenced by the given target pool. // -// - project: Project ID for this request. -// - region: Name of the region scoping this request. -// - targetPool: Name of the TargetPool resource to which the queried -// instance belongs. +// - project: Project ID for this request. +// - region: Name of the region scoping this request. +// - targetPool: Name of the TargetPool resource to which the queried +// instance belongs. func (r *TargetPoolsService) GetHealth(project string, region string, targetPool string, instancereference *InstanceReference) *TargetPoolsGetHealthCall { c := &TargetPoolsGetHealthCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -176859,17 +187002,17 @@ func (c *TargetPoolsGetHealthCall) Do(opts ...googleapi.CallOption) (*TargetPool if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TargetPoolInstanceHealth{ ServerResponse: googleapi.ServerResponse{ @@ -177040,17 +187183,17 @@ func (c *TargetPoolsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, er if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -177290,17 +187433,17 @@ func (c *TargetPoolsListCall) Do(opts ...googleapi.CallOption) (*TargetPoolList, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TargetPoolList{ ServerResponse: googleapi.ServerResponse{ @@ -177512,17 +187655,17 @@ func (c *TargetPoolsRemoveHealthCheckCall) Do(opts ...googleapi.CallOption) (*Op if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -177603,10 +187746,10 @@ type TargetPoolsRemoveInstanceCall struct { // RemoveInstance: Removes instance URL from a target pool. // -// - project: Project ID for this request. -// - region: Name of the region scoping this request. -// - targetPool: Name of the TargetPool resource to remove instances -// from. +// - project: Project ID for this request. +// - region: Name of the region scoping this request. +// - targetPool: Name of the TargetPool resource to remove instances +// from. func (r *TargetPoolsService) RemoveInstance(project string, region string, targetPool string, targetpoolsremoveinstancerequest *TargetPoolsRemoveInstanceRequest) *TargetPoolsRemoveInstanceCall { c := &TargetPoolsRemoveInstanceCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -177701,17 +187844,17 @@ func (c *TargetPoolsRemoveInstanceCall) Do(opts ...googleapi.CallOption) (*Opera if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -177792,10 +187935,10 @@ type TargetPoolsSetBackupCall struct { // SetBackup: Changes a backup target pool's configurations. // -// - project: Project ID for this request. -// - region: Name of the region scoping this request. -// - targetPool: Name of the TargetPool resource to set a backup pool -// for. +// - project: Project ID for this request. +// - region: Name of the region scoping this request. +// - targetPool: Name of the TargetPool resource to set a backup pool +// for. func (r *TargetPoolsService) SetBackup(project string, region string, targetPool string, targetreference *TargetReference) *TargetPoolsSetBackupCall { c := &TargetPoolsSetBackupCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -177897,17 +188040,17 @@ func (c *TargetPoolsSetBackupCall) Do(opts ...googleapi.CallOption) (*Operation, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -178080,17 +188223,17 @@ func (c *TargetSslProxiesDeleteCall) Do(opts ...googleapi.CallOption) (*Operatio if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -178157,8 +188300,7 @@ type TargetSslProxiesGetCall struct { header_ http.Header } -// Get: Returns the specified TargetSslProxy resource. Gets a list of -// available target SSL proxies by making a list() request. +// Get: Returns the specified TargetSslProxy resource. // // - project: Project ID for this request. // - targetSslProxy: Name of the TargetSslProxy resource to return. @@ -178245,17 +188387,17 @@ func (c *TargetSslProxiesGetCall) Do(opts ...googleapi.CallOption) (*TargetSslPr if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TargetSslProxy{ ServerResponse: googleapi.ServerResponse{ @@ -178269,7 +188411,7 @@ func (c *TargetSslProxiesGetCall) Do(opts ...googleapi.CallOption) (*TargetSslPr } return ret, nil // { - // "description": "Returns the specified TargetSslProxy resource. Gets a list of available target SSL proxies by making a list() request.", + // "description": "Returns the specified TargetSslProxy resource.", // "flatPath": "projects/{project}/global/targetSslProxies/{targetSslProxy}", // "httpMethod": "GET", // "id": "compute.targetSslProxies.get", @@ -178411,17 +188553,17 @@ func (c *TargetSslProxiesInsertCall) Do(opts ...googleapi.CallOption) (*Operatio if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -178649,17 +188791,17 @@ func (c *TargetSslProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetSslP if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TargetSslProxyList{ ServerResponse: googleapi.ServerResponse{ @@ -178765,9 +188907,9 @@ type TargetSslProxiesSetBackendServiceCall struct { // SetBackendService: Changes the BackendService for TargetSslProxy. // -// - project: Project ID for this request. -// - targetSslProxy: Name of the TargetSslProxy resource whose -// BackendService resource is to be set. +// - project: Project ID for this request. +// - targetSslProxy: Name of the TargetSslProxy resource whose +// BackendService resource is to be set. func (r *TargetSslProxiesService) SetBackendService(project string, targetSslProxy string, targetsslproxiessetbackendservicerequest *TargetSslProxiesSetBackendServiceRequest) *TargetSslProxiesSetBackendServiceCall { c := &TargetSslProxiesSetBackendServiceCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -178860,17 +189002,17 @@ func (c *TargetSslProxiesSetBackendServiceCall) Do(opts ...googleapi.CallOption) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -178942,10 +189084,10 @@ type TargetSslProxiesSetCertificateMapCall struct { // SetCertificateMap: Changes the Certificate Map for TargetSslProxy. // -// - project: Project ID for this request. -// - targetSslProxy: Name of the TargetSslProxy resource whose -// CertificateMap is to be set. The name must be 1-63 characters long, -// and comply with RFC1035. +// - project: Project ID for this request. +// - targetSslProxy: Name of the TargetSslProxy resource whose +// CertificateMap is to be set. The name must be 1-63 characters long, +// and comply with RFC1035. func (r *TargetSslProxiesService) SetCertificateMap(project string, targetSslProxy string, targetsslproxiessetcertificatemaprequest *TargetSslProxiesSetCertificateMapRequest) *TargetSslProxiesSetCertificateMapCall { c := &TargetSslProxiesSetCertificateMapCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -179038,17 +189180,17 @@ func (c *TargetSslProxiesSetCertificateMapCall) Do(opts ...googleapi.CallOption) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -179119,9 +189261,9 @@ type TargetSslProxiesSetProxyHeaderCall struct { // SetProxyHeader: Changes the ProxyHeaderType for TargetSslProxy. // -// - project: Project ID for this request. -// - targetSslProxy: Name of the TargetSslProxy resource whose -// ProxyHeader is to be set. +// - project: Project ID for this request. +// - targetSslProxy: Name of the TargetSslProxy resource whose +// ProxyHeader is to be set. func (r *TargetSslProxiesService) SetProxyHeader(project string, targetSslProxy string, targetsslproxiessetproxyheaderrequest *TargetSslProxiesSetProxyHeaderRequest) *TargetSslProxiesSetProxyHeaderCall { c := &TargetSslProxiesSetProxyHeaderCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -179214,17 +189356,17 @@ func (c *TargetSslProxiesSetProxyHeaderCall) Do(opts ...googleapi.CallOption) (* if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -179296,9 +189438,9 @@ type TargetSslProxiesSetSslCertificatesCall struct { // SetSslCertificates: Changes SslCertificates for TargetSslProxy. // -// - project: Project ID for this request. -// - targetSslProxy: Name of the TargetSslProxy resource whose -// SslCertificate resource is to be set. +// - project: Project ID for this request. +// - targetSslProxy: Name of the TargetSslProxy resource whose +// SslCertificate resource is to be set. func (r *TargetSslProxiesService) SetSslCertificates(project string, targetSslProxy string, targetsslproxiessetsslcertificatesrequest *TargetSslProxiesSetSslCertificatesRequest) *TargetSslProxiesSetSslCertificatesCall { c := &TargetSslProxiesSetSslCertificatesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -179391,17 +189533,17 @@ func (c *TargetSslProxiesSetSslCertificatesCall) Do(opts ...googleapi.CallOption if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -179476,10 +189618,10 @@ type TargetSslProxiesSetSslPolicyCall struct { // connections between clients and the SSL proxy load balancer. They do // not affect the connection between the load balancer and the backends. // -// - project: Project ID for this request. -// - targetSslProxy: Name of the TargetSslProxy resource whose SSL -// policy is to be set. The name must be 1-63 characters long, and -// comply with RFC1035. +// - project: Project ID for this request. +// - targetSslProxy: Name of the TargetSslProxy resource whose SSL +// policy is to be set. The name must be 1-63 characters long, and +// comply with RFC1035. func (r *TargetSslProxiesService) SetSslPolicy(project string, targetSslProxy string, sslpolicyreference *SslPolicyReference) *TargetSslProxiesSetSslPolicyCall { c := &TargetSslProxiesSetSslPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -179572,17 +189714,17 @@ func (c *TargetSslProxiesSetSslPolicyCall) Do(opts ...googleapi.CallOption) (*Op if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -179639,6 +189781,304 @@ func (c *TargetSslProxiesSetSslPolicyCall) Do(opts ...googleapi.CallOption) (*Op } +// method id "compute.targetTcpProxies.aggregatedList": + +type TargetTcpProxiesAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// AggregatedList: Retrieves the list of all TargetTcpProxy resources, +// regional and global, available to the specified project. +// +// - project: Name of the project scoping this request. +func (r *TargetTcpProxiesService) AggregatedList(project string) *TargetTcpProxiesAggregatedListCall { + c := &TargetTcpProxiesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic +// restarts. You can use filtering on nested fields to filter based on +// resource labels. To filter on multiple expressions, provide each +// separate expression within parentheses. For example: ``` +// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") +// ``` By default, each expression is an `AND` expression. However, you +// can include `AND` and `OR` expressions explicitly. For example: ``` +// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. +func (c *TargetTcpProxiesAggregatedListCall) Filter(filter string) *TargetTcpProxiesAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// IncludeAllScopes sets the optional parameter "includeAllScopes": +// Indicates whether every visible scope for each scope type (zone, +// region, global) should be included in the response. For new resource +// types added after this field, the flag has no effect as new resource +// types will always include every visible scope for each scope type in +// response. For resource types which predate this field, if this flag +// is omitted or false, only scopes of the scope types where the +// resource type is expected to be found will be included. +func (c *TargetTcpProxiesAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *TargetTcpProxiesAggregatedListCall { + c.urlParams_.Set("includeAllScopes", fmt.Sprint(includeAllScopes)) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *TargetTcpProxiesAggregatedListCall) MaxResults(maxResults int64) *TargetTcpProxiesAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. You can also sort results in +// descending order based on the creation timestamp using +// `orderBy="creationTimestamp desc". This sorts results based on the +// `creationTimestamp` field in reverse chronological order (newest +// result first). Use this to sort resources like operations so that the +// newest operation is returned first. Currently, only sorting by `name` +// or `creationTimestamp desc` is supported. +func (c *TargetTcpProxiesAggregatedListCall) OrderBy(orderBy string) *TargetTcpProxiesAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *TargetTcpProxiesAggregatedListCall) PageToken(pageToken string) *TargetTcpProxiesAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false. +func (c *TargetTcpProxiesAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *TargetTcpProxiesAggregatedListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TargetTcpProxiesAggregatedListCall) Fields(s ...googleapi.Field) *TargetTcpProxiesAggregatedListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *TargetTcpProxiesAggregatedListCall) IfNoneMatch(entityTag string) *TargetTcpProxiesAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TargetTcpProxiesAggregatedListCall) Context(ctx context.Context) *TargetTcpProxiesAggregatedListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *TargetTcpProxiesAggregatedListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *TargetTcpProxiesAggregatedListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/aggregated/targetTcpProxies") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.targetTcpProxies.aggregatedList" call. +// Exactly one of *TargetTcpProxyAggregatedList or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *TargetTcpProxyAggregatedList.ServerResponse.Header or (if a +// response was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *TargetTcpProxiesAggregatedListCall) Do(opts ...googleapi.CallOption) (*TargetTcpProxyAggregatedList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &TargetTcpProxyAggregatedList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of all TargetTcpProxy resources, regional and global, available to the specified project.", + // "flatPath": "projects/{project}/aggregated/targetTcpProxies", + // "httpMethod": "GET", + // "id": "compute.targetTcpProxies.aggregatedList", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "location": "query", + // "type": "string" + // }, + // "includeAllScopes": { + // "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + // "location": "query", + // "type": "boolean" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Name of the project scoping this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + // "location": "query", + // "type": "boolean" + // } + // }, + // "path": "projects/{project}/aggregated/targetTcpProxies", + // "response": { + // "$ref": "TargetTcpProxyAggregatedList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *TargetTcpProxiesAggregatedListCall) Pages(ctx context.Context, f func(*TargetTcpProxyAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + // method id "compute.targetTcpProxies.delete": type TargetTcpProxiesDeleteCall struct { @@ -179740,17 +190180,17 @@ func (c *TargetTcpProxiesDeleteCall) Do(opts ...googleapi.CallOption) (*Operatio if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -179817,8 +190257,7 @@ type TargetTcpProxiesGetCall struct { header_ http.Header } -// Get: Returns the specified TargetTcpProxy resource. Gets a list of -// available target TCP proxies by making a list() request. +// Get: Returns the specified TargetTcpProxy resource. // // - project: Project ID for this request. // - targetTcpProxy: Name of the TargetTcpProxy resource to return. @@ -179905,17 +190344,17 @@ func (c *TargetTcpProxiesGetCall) Do(opts ...googleapi.CallOption) (*TargetTcpPr if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TargetTcpProxy{ ServerResponse: googleapi.ServerResponse{ @@ -179929,7 +190368,7 @@ func (c *TargetTcpProxiesGetCall) Do(opts ...googleapi.CallOption) (*TargetTcpPr } return ret, nil // { - // "description": "Returns the specified TargetTcpProxy resource. Gets a list of available target TCP proxies by making a list() request.", + // "description": "Returns the specified TargetTcpProxy resource.", // "flatPath": "projects/{project}/global/targetTcpProxies/{targetTcpProxy}", // "httpMethod": "GET", // "id": "compute.targetTcpProxies.get", @@ -180071,17 +190510,17 @@ func (c *TargetTcpProxiesInsertCall) Do(opts ...googleapi.CallOption) (*Operatio if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -180309,17 +190748,17 @@ func (c *TargetTcpProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetTcpP if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TargetTcpProxyList{ ServerResponse: googleapi.ServerResponse{ @@ -180425,9 +190864,9 @@ type TargetTcpProxiesSetBackendServiceCall struct { // SetBackendService: Changes the BackendService for TargetTcpProxy. // -// - project: Project ID for this request. -// - targetTcpProxy: Name of the TargetTcpProxy resource whose -// BackendService resource is to be set. +// - project: Project ID for this request. +// - targetTcpProxy: Name of the TargetTcpProxy resource whose +// BackendService resource is to be set. func (r *TargetTcpProxiesService) SetBackendService(project string, targetTcpProxy string, targettcpproxiessetbackendservicerequest *TargetTcpProxiesSetBackendServiceRequest) *TargetTcpProxiesSetBackendServiceCall { c := &TargetTcpProxiesSetBackendServiceCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -180520,17 +190959,17 @@ func (c *TargetTcpProxiesSetBackendServiceCall) Do(opts ...googleapi.CallOption) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -180602,9 +191041,9 @@ type TargetTcpProxiesSetProxyHeaderCall struct { // SetProxyHeader: Changes the ProxyHeaderType for TargetTcpProxy. // -// - project: Project ID for this request. -// - targetTcpProxy: Name of the TargetTcpProxy resource whose -// ProxyHeader is to be set. +// - project: Project ID for this request. +// - targetTcpProxy: Name of the TargetTcpProxy resource whose +// ProxyHeader is to be set. func (r *TargetTcpProxiesService) SetProxyHeader(project string, targetTcpProxy string, targettcpproxiessetproxyheaderrequest *TargetTcpProxiesSetProxyHeaderRequest) *TargetTcpProxiesSetProxyHeaderCall { c := &TargetTcpProxiesSetProxyHeaderCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -180697,17 +191136,17 @@ func (c *TargetTcpProxiesSetProxyHeaderCall) Do(opts ...googleapi.CallOption) (* if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -180955,17 +191394,17 @@ func (c *TargetVpnGatewaysAggregatedListCall) Do(opts ...googleapi.CallOption) ( if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TargetVpnGatewayAggregatedList{ ServerResponse: googleapi.ServerResponse{ @@ -181167,17 +191606,17 @@ func (c *TargetVpnGatewaysDeleteCall) Do(opts ...googleapi.CallOption) (*Operati if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -181253,8 +191692,7 @@ type TargetVpnGatewaysGetCall struct { header_ http.Header } -// Get: Returns the specified target VPN gateway. Gets a list of -// available target VPN gateways by making a list() request. +// Get: Returns the specified target VPN gateway. // // - project: Project ID for this request. // - region: Name of the region for this request. @@ -181344,17 +191782,17 @@ func (c *TargetVpnGatewaysGetCall) Do(opts ...googleapi.CallOption) (*TargetVpnG if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TargetVpnGateway{ ServerResponse: googleapi.ServerResponse{ @@ -181368,7 +191806,7 @@ func (c *TargetVpnGatewaysGetCall) Do(opts ...googleapi.CallOption) (*TargetVpnG } return ret, nil // { - // "description": "Returns the specified target VPN gateway. Gets a list of available target VPN gateways by making a list() request.", + // "description": "Returns the specified target VPN gateway.", // "flatPath": "projects/{project}/regions/{region}/targetVpnGateways/{targetVpnGateway}", // "httpMethod": "GET", // "id": "compute.targetVpnGateways.get", @@ -181522,17 +191960,17 @@ func (c *TargetVpnGatewaysInsertCall) Do(opts ...googleapi.CallOption) (*Operati if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -181772,17 +192210,17 @@ func (c *TargetVpnGatewaysListCall) Do(opts ...googleapi.CallOption) (*TargetVpn if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TargetVpnGatewayList{ ServerResponse: googleapi.ServerResponse{ @@ -181882,6 +192320,195 @@ func (c *TargetVpnGatewaysListCall) Pages(ctx context.Context, f func(*TargetVpn } } +// method id "compute.targetVpnGateways.setLabels": + +type TargetVpnGatewaysSetLabelsCall struct { + s *Service + project string + region string + resource string + regionsetlabelsrequest *RegionSetLabelsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetLabels: Sets the labels on a TargetVpnGateway. To learn more about +// labels, read the Labeling Resources documentation. +// +// - project: Project ID for this request. +// - region: The region for this request. +// - resource: Name or id of the resource for this request. +func (r *TargetVpnGatewaysService) SetLabels(project string, region string, resource string, regionsetlabelsrequest *RegionSetLabelsRequest) *TargetVpnGatewaysSetLabelsCall { + c := &TargetVpnGatewaysSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.resource = resource + c.regionsetlabelsrequest = regionsetlabelsrequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *TargetVpnGatewaysSetLabelsCall) RequestId(requestId string) *TargetVpnGatewaysSetLabelsCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TargetVpnGatewaysSetLabelsCall) Fields(s ...googleapi.Field) *TargetVpnGatewaysSetLabelsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TargetVpnGatewaysSetLabelsCall) Context(ctx context.Context) *TargetVpnGatewaysSetLabelsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *TargetVpnGatewaysSetLabelsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *TargetVpnGatewaysSetLabelsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regionsetlabelsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/targetVpnGateways/{resource}/setLabels") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.targetVpnGateways.setLabels" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *TargetVpnGatewaysSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Sets the labels on a TargetVpnGateway. To learn more about labels, read the Labeling Resources documentation.", + // "flatPath": "projects/{project}/regions/{region}/targetVpnGateways/{resource}/setLabels", + // "httpMethod": "POST", + // "id": "compute.targetVpnGateways.setLabels", + // "parameterOrder": [ + // "project", + // "region", + // "resource" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "The region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/regions/{region}/targetVpnGateways/{resource}/setLabels", + // "request": { + // "$ref": "RegionSetLabelsRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + // method id "compute.urlMaps.aggregatedList": type UrlMapsAggregatedListCall struct { @@ -182073,17 +192700,17 @@ func (c *UrlMapsAggregatedListCall) Do(opts ...googleapi.CallOption) (*UrlMapsAg if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &UrlMapsAggregatedList{ ServerResponse: googleapi.ServerResponse{ @@ -182281,17 +192908,17 @@ func (c *UrlMapsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -182358,8 +192985,7 @@ type UrlMapsGetCall struct { header_ http.Header } -// Get: Returns the specified UrlMap resource. Gets a list of available -// URL maps by making a list() request. +// Get: Returns the specified UrlMap resource. // // - project: Project ID for this request. // - urlMap: Name of the UrlMap resource to return. @@ -182446,17 +193072,17 @@ func (c *UrlMapsGetCall) Do(opts ...googleapi.CallOption) (*UrlMap, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &UrlMap{ ServerResponse: googleapi.ServerResponse{ @@ -182470,7 +193096,7 @@ func (c *UrlMapsGetCall) Do(opts ...googleapi.CallOption) (*UrlMap, error) { } return ret, nil // { - // "description": "Returns the specified UrlMap resource. Gets a list of available URL maps by making a list() request.", + // "description": "Returns the specified UrlMap resource.", // "flatPath": "projects/{project}/global/urlMaps/{urlMap}", // "httpMethod": "GET", // "id": "compute.urlMaps.get", @@ -182612,17 +193238,17 @@ func (c *UrlMapsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -182783,17 +193409,17 @@ func (c *UrlMapsInvalidateCacheCall) Do(opts ...googleapi.CallOption) (*Operatio if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -183029,17 +193655,17 @@ func (c *UrlMapsListCall) Do(opts ...googleapi.CallOption) (*UrlMapList, error) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &UrlMapList{ ServerResponse: googleapi.ServerResponse{ @@ -183241,17 +193867,17 @@ func (c *UrlMapsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -183418,17 +194044,17 @@ func (c *UrlMapsUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -183580,17 +194206,17 @@ func (c *UrlMapsValidateCall) Do(opts ...googleapi.CallOption) (*UrlMapsValidate if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &UrlMapsValidateResponse{ ServerResponse: googleapi.ServerResponse{ @@ -183833,17 +194459,17 @@ func (c *VpnGatewaysAggregatedListCall) Do(opts ...googleapi.CallOption) (*VpnGa if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &VpnGatewayAggregatedList{ ServerResponse: googleapi.ServerResponse{ @@ -184045,17 +194671,17 @@ func (c *VpnGatewaysDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, er if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -184131,8 +194757,7 @@ type VpnGatewaysGetCall struct { header_ http.Header } -// Get: Returns the specified VPN gateway. Gets a list of available VPN -// gateways by making a list() request. +// Get: Returns the specified VPN gateway. // // - project: Project ID for this request. // - region: Name of the region for this request. @@ -184222,17 +194847,17 @@ func (c *VpnGatewaysGetCall) Do(opts ...googleapi.CallOption) (*VpnGateway, erro if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &VpnGateway{ ServerResponse: googleapi.ServerResponse{ @@ -184246,7 +194871,7 @@ func (c *VpnGatewaysGetCall) Do(opts ...googleapi.CallOption) (*VpnGateway, erro } return ret, nil // { - // "description": "Returns the specified VPN gateway. Gets a list of available VPN gateways by making a list() request.", + // "description": "Returns the specified VPN gateway.", // "flatPath": "projects/{project}/regions/{region}/vpnGateways/{vpnGateway}", // "httpMethod": "GET", // "id": "compute.vpnGateways.get", @@ -184394,17 +195019,17 @@ func (c *VpnGatewaysGetStatusCall) Do(opts ...googleapi.CallOption) (*VpnGateway if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &VpnGatewaysGetStatusResponse{ ServerResponse: googleapi.ServerResponse{ @@ -184572,17 +195197,17 @@ func (c *VpnGatewaysInsertCall) Do(opts ...googleapi.CallOption) (*Operation, er if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -184822,17 +195447,17 @@ func (c *VpnGatewaysListCall) Do(opts ...googleapi.CallOption) (*VpnGatewayList, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &VpnGatewayList{ ServerResponse: googleapi.ServerResponse{ @@ -185045,17 +195670,17 @@ func (c *VpnGatewaysSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -185218,17 +195843,17 @@ func (c *VpnGatewaysTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*T if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ @@ -185480,17 +196105,17 @@ func (c *VpnTunnelsAggregatedListCall) Do(opts ...googleapi.CallOption) (*VpnTun if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &VpnTunnelAggregatedList{ ServerResponse: googleapi.ServerResponse{ @@ -185692,17 +196317,17 @@ func (c *VpnTunnelsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, err if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -185778,8 +196403,7 @@ type VpnTunnelsGetCall struct { header_ http.Header } -// Get: Returns the specified VpnTunnel resource. Gets a list of -// available VPN tunnels by making a list() request. +// Get: Returns the specified VpnTunnel resource. // // - project: Project ID for this request. // - region: Name of the region for this request. @@ -185869,17 +196493,17 @@ func (c *VpnTunnelsGetCall) Do(opts ...googleapi.CallOption) (*VpnTunnel, error) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &VpnTunnel{ ServerResponse: googleapi.ServerResponse{ @@ -185893,7 +196517,7 @@ func (c *VpnTunnelsGetCall) Do(opts ...googleapi.CallOption) (*VpnTunnel, error) } return ret, nil // { - // "description": "Returns the specified VpnTunnel resource. Gets a list of available VPN tunnels by making a list() request.", + // "description": "Returns the specified VpnTunnel resource.", // "flatPath": "projects/{project}/regions/{region}/vpnTunnels/{vpnTunnel}", // "httpMethod": "GET", // "id": "compute.vpnTunnels.get", @@ -186047,17 +196671,17 @@ func (c *VpnTunnelsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, err if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -186297,17 +196921,17 @@ func (c *VpnTunnelsListCall) Do(opts ...googleapi.CallOption) (*VpnTunnelList, e if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &VpnTunnelList{ ServerResponse: googleapi.ServerResponse{ @@ -186407,6 +197031,195 @@ func (c *VpnTunnelsListCall) Pages(ctx context.Context, f func(*VpnTunnelList) e } } +// method id "compute.vpnTunnels.setLabels": + +type VpnTunnelsSetLabelsCall struct { + s *Service + project string + region string + resource string + regionsetlabelsrequest *RegionSetLabelsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetLabels: Sets the labels on a VpnTunnel. To learn more about +// labels, read the Labeling Resources documentation. +// +// - project: Project ID for this request. +// - region: The region for this request. +// - resource: Name or id of the resource for this request. +func (r *VpnTunnelsService) SetLabels(project string, region string, resource string, regionsetlabelsrequest *RegionSetLabelsRequest) *VpnTunnelsSetLabelsCall { + c := &VpnTunnelsSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.resource = resource + c.regionsetlabelsrequest = regionsetlabelsrequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *VpnTunnelsSetLabelsCall) RequestId(requestId string) *VpnTunnelsSetLabelsCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *VpnTunnelsSetLabelsCall) Fields(s ...googleapi.Field) *VpnTunnelsSetLabelsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *VpnTunnelsSetLabelsCall) Context(ctx context.Context) *VpnTunnelsSetLabelsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *VpnTunnelsSetLabelsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *VpnTunnelsSetLabelsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regionsetlabelsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/vpnTunnels/{resource}/setLabels") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.vpnTunnels.setLabels" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *VpnTunnelsSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Sets the labels on a VpnTunnel. To learn more about labels, read the Labeling Resources documentation.", + // "flatPath": "projects/{project}/regions/{region}/vpnTunnels/{resource}/setLabels", + // "httpMethod": "POST", + // "id": "compute.vpnTunnels.setLabels", + // "parameterOrder": [ + // "project", + // "region", + // "resource" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "The region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/regions/{region}/vpnTunnels/{resource}/setLabels", + // "request": { + // "$ref": "RegionSetLabelsRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + // method id "compute.zoneOperations.delete": type ZoneOperationsDeleteCall struct { @@ -186491,7 +197304,7 @@ func (c *ZoneOperationsDeleteCall) Do(opts ...googleapi.CallOption) error { } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return err + return gensupport.WrapError(err) } return nil // { @@ -186639,17 +197452,17 @@ func (c *ZoneOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, er if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -186890,17 +197703,17 @@ func (c *ZoneOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationLis if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &OperationList{ ServerResponse: googleapi.ServerResponse{ @@ -187099,17 +197912,17 @@ func (c *ZoneOperationsWaitCall) Do(opts ...googleapi.CallOption) (*Operation, e if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -187180,8 +197993,7 @@ type ZonesGetCall struct { header_ http.Header } -// Get: Returns the specified Zone resource. Gets a list of available -// zones by making a list() request. +// Get: Returns the specified Zone resource. // // - project: Project ID for this request. // - zone: Name of the zone resource to return. @@ -187268,17 +198080,17 @@ func (c *ZonesGetCall) Do(opts ...googleapi.CallOption) (*Zone, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Zone{ ServerResponse: googleapi.ServerResponse{ @@ -187292,7 +198104,7 @@ func (c *ZonesGetCall) Do(opts ...googleapi.CallOption) (*Zone, error) { } return ret, nil // { - // "description": "Returns the specified Zone resource. Gets a list of available zones by making a list() request.", + // "description": "Returns the specified Zone resource.", // "flatPath": "projects/{project}/zones/{zone}", // "httpMethod": "GET", // "id": "compute.zones.get", @@ -187507,17 +198319,17 @@ func (c *ZonesListCall) Do(opts ...googleapi.CallOption) (*ZoneList, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ZoneList{ ServerResponse: googleapi.ServerResponse{ diff --git a/vendor/google.golang.org/api/dns/v1/dns-api.json b/vendor/google.golang.org/api/dns/v1/dns-api.json index f0e24c15c..0a86c9ffb 100644 --- a/vendor/google.golang.org/api/dns/v1/dns-api.json +++ b/vendor/google.golang.org/api/dns/v1/dns-api.json @@ -732,7 +732,7 @@ ] }, "testIamPermissions": { - "description": "Returns permissions that a caller has on the specified resource. If the resource does not exist, this will return an empty set of permissions, not a `NOT_FOUND` error. Note: This operation is designed to be used for building permission-aware UIs and command-line tools, not for authorization checking. This operation may \"fail open\" without warning.", + "description": "Returns permissions that a caller has on the specified resource. If the resource does not exist, this returns an empty set of permissions, not a `NOT_FOUND` error. Note: This operation is designed to be used for building permission-aware UIs and command-line tools, not for authorization checking. This operation may \"fail open\" without warning.", "flatPath": "dns/v1/projects/{projectsId}/managedZones/{managedZonesId}:testIamPermissions", "httpMethod": "POST", "id": "dns.managedZones.testIamPermissions", @@ -1493,7 +1493,7 @@ "type": "string" }, "responsePolicy": { - "description": "User assigned name of the Respones Policy addressed by this request.", + "description": "User assigned name of the response policy addressed by this request.", "location": "path", "required": true, "type": "string" @@ -1824,7 +1824,7 @@ } } }, - "revision": "20220630", + "revision": "20230126", "rootUrl": "https://dns.googleapis.com/", "schemas": { "Change": { @@ -2156,7 +2156,7 @@ "description": "The condition that is associated with this binding. If the condition evaluates to `true`, then this binding applies to the current request. If the condition evaluates to `false`, then this binding does not apply to the current request. However, a different role binding might grant the same role to one or more of the principals in this binding. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies)." }, "members": { - "description": "Specifies the principals requesting access for a Google Cloud resource. `members` can have the following values: * `allUsers`: A special identifier that represents anyone who is on the internet; with or without a Google account. * `allAuthenticatedUsers`: A special identifier that represents anyone who is authenticated with a Google account or a service account. * `user:{emailid}`: An email address that represents a specific Google account. For example, `alice@example.com` . * `serviceAccount:{emailid}`: An email address that represents a service account. For example, `my-other-app@appspot.gserviceaccount.com`. * `group:{emailid}`: An email address that represents a Google group. For example, `admins@example.com`. * `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a user that has been recently deleted. For example, `alice@example.com?uid=123456789012345678901`. If the user is recovered, this value reverts to `user:{emailid}` and the recovered user retains the role in the binding. * `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a service account that has been recently deleted. For example, `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`. If the service account is undeleted, this value reverts to `serviceAccount:{emailid}` and the undeleted service account retains the role in the binding. * `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a Google group that has been recently deleted. For example, `admins@example.com?uid=123456789012345678901`. If the group is recovered, this value reverts to `group:{emailid}` and the recovered group retains the role in the binding. * `domain:{domain}`: The G Suite domain (primary) that represents all the users of that domain. For example, `google.com` or `example.com`. ", + "description": "Specifies the principals requesting access for a Google Cloud resource. `members` can have the following values: * `allUsers`: A special identifier that represents anyone who is on the internet; with or without a Google account. * `allAuthenticatedUsers`: A special identifier that represents anyone who is authenticated with a Google account or a service account. Does not include identities that come from external identity providers (IdPs) through identity federation. * `user:{emailid}`: An email address that represents a specific Google account. For example, `alice@example.com` . * `serviceAccount:{emailid}`: An email address that represents a Google service account. For example, `my-other-app@appspot.gserviceaccount.com`. * `serviceAccount:{projectid}.svc.id.goog[{namespace}/{kubernetes-sa}]`: An identifier for a [Kubernetes service account](https://cloud.google.com/kubernetes-engine/docs/how-to/kubernetes-service-accounts). For example, `my-project.svc.id.goog[my-namespace/my-kubernetes-sa]`. * `group:{emailid}`: An email address that represents a Google group. For example, `admins@example.com`. * `domain:{domain}`: The G Suite domain (primary) that represents all the users of that domain. For example, `google.com` or `example.com`. * `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a user that has been recently deleted. For example, `alice@example.com?uid=123456789012345678901`. If the user is recovered, this value reverts to `user:{emailid}` and the recovered user retains the role in the binding. * `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a service account that has been recently deleted. For example, `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`. If the service account is undeleted, this value reverts to `serviceAccount:{emailid}` and the undeleted service account retains the role in the binding. * `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a Google group that has been recently deleted. For example, `admins@example.com?uid=123456789012345678901`. If the group is recovered, this value reverts to `group:{emailid}` and the recovered group retains the role in the binding.", "items": { "type": "string" }, @@ -2450,6 +2450,10 @@ "description": "IPv4 address of a target name server.", "type": "string" }, + "ipv6Address": { + "description": "IPv6 address of a target name server. Does not accept both fields (ipv4 \u0026 ipv6) being populated. Public preview as of November 2022.", + "type": "string" + }, "kind": { "default": "dns#managedZoneForwardingConfigNameServerTarget", "type": "string" @@ -2517,6 +2521,13 @@ "ManagedZonePrivateVisibilityConfig": { "id": "ManagedZonePrivateVisibilityConfig", "properties": { + "gkeClusters": { + "description": "The list of Google Kubernetes Engine clusters that can see this zone.", + "items": { + "$ref": "ManagedZonePrivateVisibilityConfigGKECluster" + }, + "type": "array" + }, "kind": { "default": "dns#managedZonePrivateVisibilityConfig", "type": "string" @@ -2531,6 +2542,20 @@ }, "type": "object" }, + "ManagedZonePrivateVisibilityConfigGKECluster": { + "id": "ManagedZonePrivateVisibilityConfigGKECluster", + "properties": { + "gkeClusterName": { + "description": "The resource name of the cluster to bind this ManagedZone to. This should be specified in the format like: projects/*/locations/*/clusters/*. This is referenced from GKE projects.locations.clusters.get API: https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1/projects.locations.clusters/get", + "type": "string" + }, + "kind": { + "default": "dns#managedZonePrivateVisibilityConfigGKECluster", + "type": "string" + } + }, + "type": "object" + }, "ManagedZonePrivateVisibilityConfigNetwork": { "id": "ManagedZonePrivateVisibilityConfigNetwork", "properties": { @@ -2813,7 +2838,11 @@ "type": "string" }, "ipv4Address": { - "description": "IPv4 address to forward to.", + "description": "IPv4 address to forward queries to.", + "type": "string" + }, + "ipv6Address": { + "description": "IPv6 address to forward to. Does not accept both fields (ipv4 \u0026 ipv6) being populated. Public preview as of November 2022.", "type": "string" }, "kind": { @@ -3000,6 +3029,9 @@ "default": "dns#rRSetRoutingPolicy", "type": "string" }, + "primaryBackup": { + "$ref": "RRSetRoutingPolicyPrimaryBackupPolicy" + }, "wrr": { "$ref": "RRSetRoutingPolicyWrrPolicy" } @@ -3010,6 +3042,10 @@ "description": "Configures a RRSetRoutingPolicy that routes based on the geo location of the querying user.", "id": "RRSetRoutingPolicyGeoPolicy", "properties": { + "enableFencing": { + "description": "Without fencing, if health check fails for all configured items in the current geo bucket, we'll failover to the next nearest geo bucket. With fencing, if health check is enabled, as long as some targets in the current geo bucket are healthy, we'll return only the healthy targets. However, if they're all unhealthy, we won't failover to the next nearest bucket, we'll simply return all the items in the current bucket even though they're unhealthy.", + "type": "boolean" + }, "items": { "description": "The primary geo routing configuration. If there are multiple items with the same location, an error is returned instead.", "items": { @@ -3028,6 +3064,10 @@ "description": "ResourceRecordSet data for one geo location.", "id": "RRSetRoutingPolicyGeoPolicyGeoPolicyItem", "properties": { + "healthCheckedTargets": { + "$ref": "RRSetRoutingPolicyHealthCheckTargets", + "description": "For A and AAAA types only. Endpoints to return in the query result only if they are healthy. These can be specified along with rrdata within this item." + }, "kind": { "default": "dns#rRSetRoutingPolicyGeoPolicyGeoPolicyItem", "type": "string" @@ -3052,6 +3092,96 @@ }, "type": "object" }, + "RRSetRoutingPolicyHealthCheckTargets": { + "description": "HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response.", + "id": "RRSetRoutingPolicyHealthCheckTargets", + "properties": { + "internalLoadBalancers": { + "items": { + "$ref": "RRSetRoutingPolicyLoadBalancerTarget" + }, + "type": "array" + } + }, + "type": "object" + }, + "RRSetRoutingPolicyLoadBalancerTarget": { + "id": "RRSetRoutingPolicyLoadBalancerTarget", + "properties": { + "ipAddress": { + "description": "The frontend IP address of the", + "type": "string" + }, + "ipProtocol": { + "enum": [ + "undefined", + "tcp", + "udp" + ], + "enumDescriptions": [ + "", + "", + "" + ], + "type": "string" + }, + "kind": { + "default": "dns#rRSetRoutingPolicyLoadBalancerTarget", + "type": "string" + }, + "loadBalancerType": { + "enum": [ + "none", + "regionalL4ilb" + ], + "enumDescriptions": [ + "", + "" + ], + "type": "string" + }, + "networkUrl": { + "description": "The fully qualified url of the network on which the ILB is", + "type": "string" + }, + "port": { + "description": "Load Balancer to health check. The configured port of the Load Balancer.", + "type": "string" + }, + "project": { + "description": "present. This should be formatted like https://www.googleapis.com/compute/v1/projects/{project}/global/networks/{network} The project ID in which the ILB exists.", + "type": "string" + }, + "region": { + "description": "The region for regional ILBs.", + "type": "string" + } + }, + "type": "object" + }, + "RRSetRoutingPolicyPrimaryBackupPolicy": { + "description": "Configures a RRSetRoutingPolicy such that all queries are responded with the primary_targets if they are healthy. And if all of them are unhealthy, then we fallback to a geo localized policy.", + "id": "RRSetRoutingPolicyPrimaryBackupPolicy", + "properties": { + "backupGeoTargets": { + "$ref": "RRSetRoutingPolicyGeoPolicy", + "description": "Backup targets provide a regional failover policy for the otherwise global primary targets. If serving state is set to BACKUP, this policy essentially becomes a geo routing policy." + }, + "kind": { + "default": "dns#rRSetRoutingPolicyPrimaryBackupPolicy", + "type": "string" + }, + "primaryTargets": { + "$ref": "RRSetRoutingPolicyHealthCheckTargets" + }, + "trickleTraffic": { + "description": "When serving state is PRIMARY, this field provides the option of sending a small percentage of the traffic to the backup targets.", + "format": "double", + "type": "number" + } + }, + "type": "object" + }, "RRSetRoutingPolicyWrrPolicy": { "description": "Configures a RRSetRoutingPolicy that routes in a weighted round robin fashion.", "id": "RRSetRoutingPolicyWrrPolicy", @@ -3073,6 +3203,10 @@ "description": "A routing block which contains the routing information for one WRR item.", "id": "RRSetRoutingPolicyWrrPolicyWrrPolicyItem", "properties": { + "healthCheckedTargets": { + "$ref": "RRSetRoutingPolicyHealthCheckTargets", + "description": "endpoints that need to be health checked before making the routing decision. The unhealthy endpoints will be omitted from the result. If all endpoints within a buckete are unhealthy, we'll choose a different bucket (sampled w.r.t. its weight) for responding. Note that if DNSSEC is enabled for this zone, only one of rrdata or health_checked_targets can be set." + }, "kind": { "default": "dns#rRSetRoutingPolicyWrrPolicyWrrPolicyItem", "type": "string" @@ -3188,7 +3322,7 @@ "$ref": "ResponseHeader" }, "nextPageToken": { - "description": "The presence of this field indicates that there exist more results following your last page of results in pagination order. To fetch them, make another list request using this value as your page token. This lets you the complete contents of even very large collections one page at a time. However, if the contents of the collection change between the first and last paginated list request, the set of all elements returned are an inconsistent view of the collection. You cannot retrieve a consistent snapshot of a collection larger than the maximum page size.", + "description": "The presence of this field indicates that more results exist following your last page of results in pagination order. To fetch them, make another list request by using this value as your page token. This lets you view the complete contents of even very large collections one page at a time. However, if the contents of the collection change between the first and last paginated list request, the set of all elements returned are an inconsistent view of the collection. You cannot retrieve a consistent snapshot of a collection larger than the maximum page size.", "type": "string" }, "responsePolicies": { @@ -3233,6 +3367,13 @@ "description": "User-provided description for this Response Policy.", "type": "string" }, + "gkeClusters": { + "description": "The list of Google Kubernetes Engine clusters to which this response policy is applied.", + "items": { + "$ref": "ResponsePolicyGKECluster" + }, + "type": "array" + }, "id": { "description": "Unique identifier for the resource; defined by the server (output only).", "format": "int64", @@ -3242,6 +3383,13 @@ "default": "dns#responsePolicy", "type": "string" }, + "labels": { + "additionalProperties": { + "type": "string" + }, + "description": "User labels.", + "type": "object" + }, "networks": { "description": "List of network names specifying networks to which this policy is applied.", "items": { @@ -3256,6 +3404,20 @@ }, "type": "object" }, + "ResponsePolicyGKECluster": { + "id": "ResponsePolicyGKECluster", + "properties": { + "gkeClusterName": { + "description": "The resource name of the cluster to bind this response policy to. This should be specified in the format like: projects/*/locations/*/clusters/*. This is referenced from GKE projects.locations.clusters.get API: https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1/projects.locations.clusters/get", + "type": "string" + }, + "kind": { + "default": "dns#responsePolicyGKECluster", + "type": "string" + } + }, + "type": "object" + }, "ResponsePolicyNetwork": { "id": "ResponsePolicyNetwork", "properties": { diff --git a/vendor/google.golang.org/api/dns/v1/dns-gen.go b/vendor/google.golang.org/api/dns/v1/dns-gen.go index 600b4f8ef..19a209a8c 100644 --- a/vendor/google.golang.org/api/dns/v1/dns-gen.go +++ b/vendor/google.golang.org/api/dns/v1/dns-gen.go @@ -1,4 +1,4 @@ -// Copyright 2022 Google LLC. +// Copyright 2023 Google LLC. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -8,35 +8,35 @@ // // For product documentation, see: https://cloud.google.com/dns/docs // -// Creating a client +// # Creating a client // // Usage example: // -// import "google.golang.org/api/dns/v1" -// ... -// ctx := context.Background() -// dnsService, err := dns.NewService(ctx) +// import "google.golang.org/api/dns/v1" +// ... +// ctx := context.Background() +// dnsService, err := dns.NewService(ctx) // // In this example, Google Application Default Credentials are used for authentication. // // For information on how to create and obtain Application Default Credentials, see https://developers.google.com/identity/protocols/application-default-credentials. // -// Other authentication options +// # Other authentication options // // By default, all available scopes (see "Constants") are used to authenticate. To restrict scopes, use option.WithScopes: // -// dnsService, err := dns.NewService(ctx, option.WithScopes(dns.NdevClouddnsReadwriteScope)) +// dnsService, err := dns.NewService(ctx, option.WithScopes(dns.NdevClouddnsReadwriteScope)) // // To use an API key for authentication (note: some APIs do not support API keys), use option.WithAPIKey: // -// dnsService, err := dns.NewService(ctx, option.WithAPIKey("AIza...")) +// dnsService, err := dns.NewService(ctx, option.WithAPIKey("AIza...")) // // To use an OAuth token (e.g., a user token obtained via a three-legged OAuth flow), use option.WithTokenSource: // -// config := &oauth2.Config{...} -// // ... -// token, err := config.Exchange(ctx, ...) -// dnsService, err := dns.NewService(ctx, option.WithTokenSource(config.TokenSource(ctx, token))) +// config := &oauth2.Config{...} +// // ... +// token, err := config.Exchange(ctx, ...) +// dnsService, err := dns.NewService(ctx, option.WithTokenSource(config.TokenSource(ctx, token))) // // See https://godoc.org/google.golang.org/api/option/ for details on options. package dns // import "google.golang.org/api/dns/v1" @@ -780,19 +780,26 @@ type GoogleIamV1Binding struct { // `allUsers`: A special identifier that represents anyone who is on the // internet; with or without a Google account. * // `allAuthenticatedUsers`: A special identifier that represents anyone - // who is authenticated with a Google account or a service account. * - // `user:{emailid}`: An email address that represents a specific Google - // account. For example, `alice@example.com` . * - // `serviceAccount:{emailid}`: An email address that represents a - // service account. For example, - // `my-other-app@appspot.gserviceaccount.com`. * `group:{emailid}`: An - // email address that represents a Google group. For example, - // `admins@example.com`. * `deleted:user:{emailid}?uid={uniqueid}`: An - // email address (plus unique identifier) representing a user that has - // been recently deleted. For example, - // `alice@example.com?uid=123456789012345678901`. If the user is - // recovered, this value reverts to `user:{emailid}` and the recovered - // user retains the role in the binding. * + // who is authenticated with a Google account or a service account. Does + // not include identities that come from external identity providers + // (IdPs) through identity federation. * `user:{emailid}`: An email + // address that represents a specific Google account. For example, + // `alice@example.com` . * `serviceAccount:{emailid}`: An email address + // that represents a Google service account. For example, + // `my-other-app@appspot.gserviceaccount.com`. * + // `serviceAccount:{projectid}.svc.id.goog[{namespace}/{kubernetes-sa}]`: + // An identifier for a Kubernetes service account + // (https://cloud.google.com/kubernetes-engine/docs/how-to/kubernetes-service-accounts). + // For example, `my-project.svc.id.goog[my-namespace/my-kubernetes-sa]`. + // * `group:{emailid}`: An email address that represents a Google group. + // For example, `admins@example.com`. * `domain:{domain}`: The G Suite + // domain (primary) that represents all the users of that domain. For + // example, `google.com` or `example.com`. * + // `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus + // unique identifier) representing a user that has been recently + // deleted. For example, `alice@example.com?uid=123456789012345678901`. + // If the user is recovered, this value reverts to `user:{emailid}` and + // the recovered user retains the role in the binding. * // `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address // (plus unique identifier) representing a service account that has been // recently deleted. For example, @@ -804,9 +811,7 @@ type GoogleIamV1Binding struct { // that has been recently deleted. For example, // `admins@example.com?uid=123456789012345678901`. If the group is // recovered, this value reverts to `group:{emailid}` and the recovered - // group retains the role in the binding. * `domain:{domain}`: The G - // Suite domain (primary) that represents all the users of that domain. - // For example, `google.com` or `example.com`. + // group retains the role in the binding. Members []string `json:"members,omitempty"` // Role: Role that is assigned to the list of `members`, or principals. @@ -1362,6 +1367,11 @@ type ManagedZoneForwardingConfigNameServerTarget struct { // Ipv4Address: IPv4 address of a target name server. Ipv4Address string `json:"ipv4Address,omitempty"` + // Ipv6Address: IPv6 address of a target name server. Does not accept + // both fields (ipv4 & ipv6) being populated. Public preview as of + // November 2022. + Ipv6Address string `json:"ipv6Address,omitempty"` + Kind string `json:"kind,omitempty"` // ForceSendFields is a list of field names (e.g. "ForwardingPath") to @@ -1504,12 +1514,16 @@ func (s *ManagedZonePeeringConfigTargetNetwork) MarshalJSON() ([]byte, error) { } type ManagedZonePrivateVisibilityConfig struct { + // GkeClusters: The list of Google Kubernetes Engine clusters that can + // see this zone. + GkeClusters []*ManagedZonePrivateVisibilityConfigGKECluster `json:"gkeClusters,omitempty"` + Kind string `json:"kind,omitempty"` // Networks: The list of VPC networks that can see this zone. Networks []*ManagedZonePrivateVisibilityConfigNetwork `json:"networks,omitempty"` - // ForceSendFields is a list of field names (e.g. "Kind") to + // ForceSendFields is a list of field names (e.g. "GkeClusters") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -1517,10 +1531,10 @@ type ManagedZonePrivateVisibilityConfig struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Kind") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "GkeClusters") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` @@ -1532,6 +1546,40 @@ func (s *ManagedZonePrivateVisibilityConfig) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +type ManagedZonePrivateVisibilityConfigGKECluster struct { + // GkeClusterName: The resource name of the cluster to bind this + // ManagedZone to. This should be specified in the format like: + // projects/*/locations/*/clusters/*. This is referenced from GKE + // projects.locations.clusters.get API: + // https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1/projects.locations.clusters/get + GkeClusterName string `json:"gkeClusterName,omitempty"` + + Kind string `json:"kind,omitempty"` + + // ForceSendFields is a list of field names (e.g. "GkeClusterName") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "GkeClusterName") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *ManagedZonePrivateVisibilityConfigGKECluster) MarshalJSON() ([]byte, error) { + type NoMethod ManagedZonePrivateVisibilityConfigGKECluster + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type ManagedZonePrivateVisibilityConfigNetwork struct { Kind string `json:"kind,omitempty"` @@ -2062,9 +2110,13 @@ type PolicyAlternativeNameServerConfigTargetNameServer struct { // VPC. ForwardingPath string `json:"forwardingPath,omitempty"` - // Ipv4Address: IPv4 address to forward to. + // Ipv4Address: IPv4 address to forward queries to. Ipv4Address string `json:"ipv4Address,omitempty"` + // Ipv6Address: IPv6 address to forward to. Does not accept both fields + // (ipv4 & ipv6) being populated. Public preview as of November 2022. + Ipv6Address string `json:"ipv6Address,omitempty"` + Kind string `json:"kind,omitempty"` // ForceSendFields is a list of field names (e.g. "ForwardingPath") to @@ -2292,6 +2344,8 @@ type RRSetRoutingPolicy struct { Kind string `json:"kind,omitempty"` + PrimaryBackup *RRSetRoutingPolicyPrimaryBackupPolicy `json:"primaryBackup,omitempty"` + Wrr *RRSetRoutingPolicyWrrPolicy `json:"wrr,omitempty"` // ForceSendFields is a list of field names (e.g. "Geo") to @@ -2320,13 +2374,22 @@ func (s *RRSetRoutingPolicy) MarshalJSON() ([]byte, error) { // RRSetRoutingPolicyGeoPolicy: Configures a RRSetRoutingPolicy that // routes based on the geo location of the querying user. type RRSetRoutingPolicyGeoPolicy struct { + // EnableFencing: Without fencing, if health check fails for all + // configured items in the current geo bucket, we'll failover to the + // next nearest geo bucket. With fencing, if health check is enabled, as + // long as some targets in the current geo bucket are healthy, we'll + // return only the healthy targets. However, if they're all unhealthy, + // we won't failover to the next nearest bucket, we'll simply return all + // the items in the current bucket even though they're unhealthy. + EnableFencing bool `json:"enableFencing,omitempty"` + // Items: The primary geo routing configuration. If there are multiple // items with the same location, an error is returned instead. Items []*RRSetRoutingPolicyGeoPolicyGeoPolicyItem `json:"items,omitempty"` Kind string `json:"kind,omitempty"` - // ForceSendFields is a list of field names (e.g. "Items") to + // ForceSendFields is a list of field names (e.g. "EnableFencing") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -2334,10 +2397,10 @@ type RRSetRoutingPolicyGeoPolicy struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Items") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "EnableFencing") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` @@ -2352,6 +2415,11 @@ func (s *RRSetRoutingPolicyGeoPolicy) MarshalJSON() ([]byte, error) { // RRSetRoutingPolicyGeoPolicyGeoPolicyItem: ResourceRecordSet data for // one geo location. type RRSetRoutingPolicyGeoPolicyGeoPolicyItem struct { + // HealthCheckedTargets: For A and AAAA types only. Endpoints to return + // in the query result only if they are healthy. These can be specified + // along with rrdata within this item. + HealthCheckedTargets *RRSetRoutingPolicyHealthCheckTargets `json:"healthCheckedTargets,omitempty"` + Kind string `json:"kind,omitempty"` // Location: The geo-location granularity is a GCP region. This location @@ -2366,7 +2434,96 @@ type RRSetRoutingPolicyGeoPolicyGeoPolicyItem struct { // for DNSSEC enabled zones, there's a restriction of 1 ip per item. . SignatureRrdatas []string `json:"signatureRrdatas,omitempty"` - // ForceSendFields is a list of field names (e.g. "Kind") to + // ForceSendFields is a list of field names (e.g. + // "HealthCheckedTargets") to unconditionally include in API requests. + // By default, fields with empty or default values are omitted from API + // requests. However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "HealthCheckedTargets") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *RRSetRoutingPolicyGeoPolicyGeoPolicyItem) MarshalJSON() ([]byte, error) { + type NoMethod RRSetRoutingPolicyGeoPolicyGeoPolicyItem + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// RRSetRoutingPolicyHealthCheckTargets: HealthCheckTargets describes +// endpoints to health-check when responding to Routing Policy queries. +// Only the healthy endpoints will be included in the response. +type RRSetRoutingPolicyHealthCheckTargets struct { + InternalLoadBalancers []*RRSetRoutingPolicyLoadBalancerTarget `json:"internalLoadBalancers,omitempty"` + + // ForceSendFields is a list of field names (e.g. + // "InternalLoadBalancers") to unconditionally include in API requests. + // By default, fields with empty or default values are omitted from API + // requests. However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "InternalLoadBalancers") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *RRSetRoutingPolicyHealthCheckTargets) MarshalJSON() ([]byte, error) { + type NoMethod RRSetRoutingPolicyHealthCheckTargets + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type RRSetRoutingPolicyLoadBalancerTarget struct { + // IpAddress: The frontend IP address of the + IpAddress string `json:"ipAddress,omitempty"` + + // Possible values: + // "undefined" + // "tcp" + // "udp" + IpProtocol string `json:"ipProtocol,omitempty"` + + Kind string `json:"kind,omitempty"` + + // Possible values: + // "none" + // "regionalL4ilb" + LoadBalancerType string `json:"loadBalancerType,omitempty"` + + // NetworkUrl: The fully qualified url of the network on which the ILB + // is + NetworkUrl string `json:"networkUrl,omitempty"` + + // Port: Load Balancer to health check. The configured port of the Load + // Balancer. + Port string `json:"port,omitempty"` + + // Project: present. This should be formatted like + // https://www.googleapis.com/compute/v1/projects/{project}/global/networks/{network} + // The project ID in which the ILB exists. + Project string `json:"project,omitempty"` + + // Region: The region for regional ILBs. + Region string `json:"region,omitempty"` + + // ForceSendFields is a list of field names (e.g. "IpAddress") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -2374,8 +2531,8 @@ type RRSetRoutingPolicyGeoPolicyGeoPolicyItem struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Kind") to include in API - // requests with the JSON null value. By default, fields with empty + // NullFields is a list of field names (e.g. "IpAddress") to include in + // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. @@ -2383,12 +2540,69 @@ type RRSetRoutingPolicyGeoPolicyGeoPolicyItem struct { NullFields []string `json:"-"` } -func (s *RRSetRoutingPolicyGeoPolicyGeoPolicyItem) MarshalJSON() ([]byte, error) { - type NoMethod RRSetRoutingPolicyGeoPolicyGeoPolicyItem +func (s *RRSetRoutingPolicyLoadBalancerTarget) MarshalJSON() ([]byte, error) { + type NoMethod RRSetRoutingPolicyLoadBalancerTarget raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// RRSetRoutingPolicyPrimaryBackupPolicy: Configures a +// RRSetRoutingPolicy such that all queries are responded with the +// primary_targets if they are healthy. And if all of them are +// unhealthy, then we fallback to a geo localized policy. +type RRSetRoutingPolicyPrimaryBackupPolicy struct { + // BackupGeoTargets: Backup targets provide a regional failover policy + // for the otherwise global primary targets. If serving state is set to + // BACKUP, this policy essentially becomes a geo routing policy. + BackupGeoTargets *RRSetRoutingPolicyGeoPolicy `json:"backupGeoTargets,omitempty"` + + Kind string `json:"kind,omitempty"` + + PrimaryTargets *RRSetRoutingPolicyHealthCheckTargets `json:"primaryTargets,omitempty"` + + // TrickleTraffic: When serving state is PRIMARY, this field provides + // the option of sending a small percentage of the traffic to the backup + // targets. + TrickleTraffic float64 `json:"trickleTraffic,omitempty"` + + // ForceSendFields is a list of field names (e.g. "BackupGeoTargets") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "BackupGeoTargets") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *RRSetRoutingPolicyPrimaryBackupPolicy) MarshalJSON() ([]byte, error) { + type NoMethod RRSetRoutingPolicyPrimaryBackupPolicy + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +func (s *RRSetRoutingPolicyPrimaryBackupPolicy) UnmarshalJSON(data []byte) error { + type NoMethod RRSetRoutingPolicyPrimaryBackupPolicy + var s1 struct { + TrickleTraffic gensupport.JSONFloat64 `json:"trickleTraffic"` + *NoMethod + } + s1.NoMethod = (*NoMethod)(s) + if err := json.Unmarshal(data, &s1); err != nil { + return err + } + s.TrickleTraffic = float64(s1.TrickleTraffic) + return nil +} + // RRSetRoutingPolicyWrrPolicy: Configures a RRSetRoutingPolicy that // routes in a weighted round robin fashion. type RRSetRoutingPolicyWrrPolicy struct { @@ -2422,6 +2636,14 @@ func (s *RRSetRoutingPolicyWrrPolicy) MarshalJSON() ([]byte, error) { // RRSetRoutingPolicyWrrPolicyWrrPolicyItem: A routing block which // contains the routing information for one WRR item. type RRSetRoutingPolicyWrrPolicyWrrPolicyItem struct { + // HealthCheckedTargets: endpoints that need to be health checked before + // making the routing decision. The unhealthy endpoints will be omitted + // from the result. If all endpoints within a buckete are unhealthy, + // we'll choose a different bucket (sampled w.r.t. its weight) for + // responding. Note that if DNSSEC is enabled for this zone, only one of + // rrdata or health_checked_targets can be set. + HealthCheckedTargets *RRSetRoutingPolicyHealthCheckTargets `json:"healthCheckedTargets,omitempty"` + Kind string `json:"kind,omitempty"` Rrdatas []string `json:"rrdatas,omitempty"` @@ -2438,20 +2660,22 @@ type RRSetRoutingPolicyWrrPolicyWrrPolicyItem struct { // should be non-negative. Weight float64 `json:"weight,omitempty"` - // ForceSendFields is a list of field names (e.g. "Kind") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. + // ForceSendFields is a list of field names (e.g. + // "HealthCheckedTargets") to unconditionally include in API requests. + // By default, fields with empty or default values are omitted from API + // requests. However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Kind") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "HealthCheckedTargets") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. NullFields []string `json:"-"` } @@ -2617,11 +2841,11 @@ func (s *ResponseHeader) MarshalJSON() ([]byte, error) { type ResponsePoliciesListResponse struct { Header *ResponseHeader `json:"header,omitempty"` - // NextPageToken: The presence of this field indicates that there exist - // more results following your last page of results in pagination order. - // To fetch them, make another list request using this value as your - // page token. This lets you the complete contents of even very large - // collections one page at a time. However, if the contents of the + // NextPageToken: The presence of this field indicates that more results + // exist following your last page of results in pagination order. To + // fetch them, make another list request by using this value as your + // page token. This lets you view the complete contents of even very + // large collections one page at a time. However, if the contents of the // collection change between the first and last paginated list request, // the set of all elements returned are an inconsistent view of the // collection. You cannot retrieve a consistent snapshot of a collection @@ -2729,12 +2953,19 @@ type ResponsePolicy struct { // Description: User-provided description for this Response Policy. Description string `json:"description,omitempty"` + // GkeClusters: The list of Google Kubernetes Engine clusters to which + // this response policy is applied. + GkeClusters []*ResponsePolicyGKECluster `json:"gkeClusters,omitempty"` + // Id: Unique identifier for the resource; defined by the server (output // only). Id int64 `json:"id,omitempty,string"` Kind string `json:"kind,omitempty"` + // Labels: User labels. + Labels map[string]string `json:"labels,omitempty"` + // Networks: List of network names specifying networks to which this // policy is applied. Networks []*ResponsePolicyNetwork `json:"networks,omitempty"` @@ -2769,6 +3000,40 @@ func (s *ResponsePolicy) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +type ResponsePolicyGKECluster struct { + // GkeClusterName: The resource name of the cluster to bind this + // response policy to. This should be specified in the format like: + // projects/*/locations/*/clusters/*. This is referenced from GKE + // projects.locations.clusters.get API: + // https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1/projects.locations.clusters/get + GkeClusterName string `json:"gkeClusterName,omitempty"` + + Kind string `json:"kind,omitempty"` + + // ForceSendFields is a list of field names (e.g. "GkeClusterName") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "GkeClusterName") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *ResponsePolicyGKECluster) MarshalJSON() ([]byte, error) { + type NoMethod ResponsePolicyGKECluster + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type ResponsePolicyNetwork struct { Kind string `json:"kind,omitempty"` @@ -3019,9 +3284,9 @@ type ChangesCreateCall struct { // Create: Atomically updates the ResourceRecordSet collection. // -// - managedZone: Identifies the managed zone addressed by this request. -// Can be the managed zone name or ID. -// - project: Identifies the project addressed by this request. +// - managedZone: Identifies the managed zone addressed by this request. +// Can be the managed zone name or ID. +// - project: Identifies the project addressed by this request. func (r *ChangesService) Create(project string, managedZone string, change *Change) *ChangesCreateCall { c := &ChangesCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -3107,17 +3372,17 @@ func (c *ChangesCreateCall) Do(opts ...googleapi.CallOption) (*Change, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Change{ ServerResponse: googleapi.ServerResponse{ @@ -3188,11 +3453,11 @@ type ChangesGetCall struct { // Get: Fetches the representation of an existing Change. // -// - changeId: The identifier of the requested change, from a previous -// ResourceRecordSetsChangeResponse. -// - managedZone: Identifies the managed zone addressed by this request. -// Can be the managed zone name or ID. -// - project: Identifies the project addressed by this request. +// - changeId: The identifier of the requested change, from a previous +// ResourceRecordSetsChangeResponse. +// - managedZone: Identifies the managed zone addressed by this request. +// Can be the managed zone name or ID. +// - project: Identifies the project addressed by this request. func (r *ChangesService) Get(project string, managedZone string, changeId string) *ChangesGetCall { c := &ChangesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -3287,17 +3552,17 @@ func (c *ChangesGetCall) Do(opts ...googleapi.CallOption) (*Change, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Change{ ServerResponse: googleapi.ServerResponse{ @@ -3373,9 +3638,9 @@ type ChangesListCall struct { // List: Enumerates Changes to a ResourceRecordSet collection. // -// - managedZone: Identifies the managed zone addressed by this request. -// Can be the managed zone name or ID. -// - project: Identifies the project addressed by this request. +// - managedZone: Identifies the managed zone addressed by this request. +// Can be the managed zone name or ID. +// - project: Identifies the project addressed by this request. func (r *ChangesService) List(project string, managedZone string) *ChangesListCall { c := &ChangesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -3403,7 +3668,8 @@ func (c *ChangesListCall) PageToken(pageToken string) *ChangesListCall { // only supported value is change sequence. // // Possible values: -// "changeSequence" (default) +// +// "changeSequence" (default) func (c *ChangesListCall) SortBy(sortBy string) *ChangesListCall { c.urlParams_.Set("sortBy", sortBy) return c @@ -3492,17 +3758,17 @@ func (c *ChangesListCall) Do(opts ...googleapi.CallOption) (*ChangesListResponse if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ChangesListResponse{ ServerResponse: googleapi.ServerResponse{ @@ -3616,10 +3882,10 @@ type DnsKeysGetCall struct { // Get: Fetches the representation of an existing DnsKey. // -// - dnsKeyId: The identifier of the requested DnsKey. -// - managedZone: Identifies the managed zone addressed by this request. -// Can be the managed zone name or ID. -// - project: Identifies the project addressed by this request. +// - dnsKeyId: The identifier of the requested DnsKey. +// - managedZone: Identifies the managed zone addressed by this request. +// Can be the managed zone name or ID. +// - project: Identifies the project addressed by this request. func (r *DnsKeysService) Get(project string, managedZone string, dnsKeyId string) *DnsKeysGetCall { c := &DnsKeysGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -3723,17 +3989,17 @@ func (c *DnsKeysGetCall) Do(opts ...googleapi.CallOption) (*DnsKey, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &DnsKey{ ServerResponse: googleapi.ServerResponse{ @@ -3814,9 +4080,9 @@ type DnsKeysListCall struct { // List: Enumerates DnsKeys to a ResourceRecordSet collection. // -// - managedZone: Identifies the managed zone addressed by this request. -// Can be the managed zone name or ID. -// - project: Identifies the project addressed by this request. +// - managedZone: Identifies the managed zone addressed by this request. +// Can be the managed zone name or ID. +// - project: Identifies the project addressed by this request. func (r *DnsKeysService) List(project string, managedZone string) *DnsKeysListCall { c := &DnsKeysListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -3925,17 +4191,17 @@ func (c *DnsKeysListCall) Do(opts ...googleapi.CallOption) (*DnsKeysListResponse if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &DnsKeysListResponse{ ServerResponse: googleapi.ServerResponse{ @@ -4037,10 +4303,10 @@ type ManagedZoneOperationsGetCall struct { // Get: Fetches the representation of an existing Operation. // -// - managedZone: Identifies the managed zone addressed by this request. -// - operation: Identifies the operation addressed by this request (ID -// of the operation). -// - project: Identifies the project addressed by this request. +// - managedZone: Identifies the managed zone addressed by this request. +// - operation: Identifies the operation addressed by this request (ID +// of the operation). +// - project: Identifies the project addressed by this request. func (r *ManagedZoneOperationsService) Get(project string, managedZone string, operation string) *ManagedZoneOperationsGetCall { c := &ManagedZoneOperationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -4135,17 +4401,17 @@ func (c *ManagedZoneOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operat if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -4250,8 +4516,9 @@ func (c *ManagedZoneOperationsListCall) PageToken(pageToken string) *ManagedZone // only supported values are START_TIME and ID. // // Possible values: -// "startTime" (default) -// "id" +// +// "startTime" (default) +// "id" func (c *ManagedZoneOperationsListCall) SortBy(sortBy string) *ManagedZoneOperationsListCall { c.urlParams_.Set("sortBy", sortBy) return c @@ -4334,17 +4601,17 @@ func (c *ManagedZoneOperationsListCall) Do(opts ...googleapi.CallOption) (*Manag if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ManagedZoneOperationsListResponse{ ServerResponse: googleapi.ServerResponse{ @@ -4537,17 +4804,17 @@ func (c *ManagedZonesCreateCall) Do(opts ...googleapi.CallOption) (*ManagedZone, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ManagedZone{ ServerResponse: googleapi.ServerResponse{ @@ -4609,9 +4876,9 @@ type ManagedZonesDeleteCall struct { // Delete: Deletes a previously created ManagedZone. // -// - managedZone: Identifies the managed zone addressed by this request. -// Can be the managed zone name or ID. -// - project: Identifies the project addressed by this request. +// - managedZone: Identifies the managed zone addressed by this request. +// Can be the managed zone name or ID. +// - project: Identifies the project addressed by this request. func (r *ManagedZonesService) Delete(project string, managedZone string) *ManagedZonesDeleteCall { c := &ManagedZonesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -4686,7 +4953,7 @@ func (c *ManagedZonesDeleteCall) Do(opts ...googleapi.CallOption) error { } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return err + return gensupport.WrapError(err) } return nil // { @@ -4740,9 +5007,9 @@ type ManagedZonesGetCall struct { // Get: Fetches the representation of an existing ManagedZone. // -// - managedZone: Identifies the managed zone addressed by this request. -// Can be the managed zone name or ID. -// - project: Identifies the project addressed by this request. +// - managedZone: Identifies the managed zone addressed by this request. +// Can be the managed zone name or ID. +// - project: Identifies the project addressed by this request. func (r *ManagedZonesService) Get(project string, managedZone string) *ManagedZonesGetCall { c := &ManagedZonesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -4835,17 +5102,17 @@ func (c *ManagedZonesGetCall) Do(opts ...googleapi.CallOption) (*ManagedZone, er if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ManagedZone{ ServerResponse: googleapi.ServerResponse{ @@ -4915,10 +5182,10 @@ type ManagedZonesGetIamPolicyCall struct { // an empty policy if the resource exists and does not have a policy // set. // -// - resource: REQUIRED: The resource for which the policy is being -// requested. See Resource names -// (https://cloud.google.com/apis/design/resource_names) for the -// appropriate value for this field. +// - resource: REQUIRED: The resource for which the policy is being +// requested. See Resource names +// (https://cloud.google.com/apis/design/resource_names) for the +// appropriate value for this field. func (r *ManagedZonesService) GetIamPolicy(resource string, googleiamv1getiampolicyrequest *GoogleIamV1GetIamPolicyRequest) *ManagedZonesGetIamPolicyCall { c := &ManagedZonesGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -4993,17 +5260,17 @@ func (c *ManagedZonesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Google if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &GoogleIamV1Policy{ ServerResponse: googleapi.ServerResponse{ @@ -5169,17 +5436,17 @@ func (c *ManagedZonesListCall) Do(opts ...googleapi.CallOption) (*ManagedZonesLi if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ManagedZonesListResponse{ ServerResponse: googleapi.ServerResponse{ @@ -5273,9 +5540,9 @@ type ManagedZonesPatchCall struct { // Patch: Applies a partial update to an existing ManagedZone. // -// - managedZone: Identifies the managed zone addressed by this request. -// Can be the managed zone name or ID. -// - project: Identifies the project addressed by this request. +// - managedZone: Identifies the managed zone addressed by this request. +// Can be the managed zone name or ID. +// - project: Identifies the project addressed by this request. func (r *ManagedZonesService) Patch(project string, managedZone string, managedzone *ManagedZone) *ManagedZonesPatchCall { c := &ManagedZonesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -5361,17 +5628,17 @@ func (c *ManagedZonesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, er if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -5442,10 +5709,10 @@ type ManagedZonesSetIamPolicyCall struct { // resource. Replaces any existing policy. Can return `NOT_FOUND`, // `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors. // -// - resource: REQUIRED: The resource for which the policy is being -// specified. See Resource names -// (https://cloud.google.com/apis/design/resource_names) for the -// appropriate value for this field. +// - resource: REQUIRED: The resource for which the policy is being +// specified. See Resource names +// (https://cloud.google.com/apis/design/resource_names) for the +// appropriate value for this field. func (r *ManagedZonesService) SetIamPolicy(resource string, googleiamv1setiampolicyrequest *GoogleIamV1SetIamPolicyRequest) *ManagedZonesSetIamPolicyCall { c := &ManagedZonesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -5520,17 +5787,17 @@ func (c *ManagedZonesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Google if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &GoogleIamV1Policy{ ServerResponse: googleapi.ServerResponse{ @@ -5587,16 +5854,16 @@ type ManagedZonesTestIamPermissionsCall struct { } // TestIamPermissions: Returns permissions that a caller has on the -// specified resource. If the resource does not exist, this will return -// an empty set of permissions, not a `NOT_FOUND` error. Note: This +// specified resource. If the resource does not exist, this returns an +// empty set of permissions, not a `NOT_FOUND` error. Note: This // operation is designed to be used for building permission-aware UIs // and command-line tools, not for authorization checking. This // operation may "fail open" without warning. // -// - resource: REQUIRED: The resource for which the policy detail is -// being requested. See Resource names -// (https://cloud.google.com/apis/design/resource_names) for the -// appropriate value for this field. +// - resource: REQUIRED: The resource for which the policy detail is +// being requested. See Resource names +// (https://cloud.google.com/apis/design/resource_names) for the +// appropriate value for this field. func (r *ManagedZonesService) TestIamPermissions(resource string, googleiamv1testiampermissionsrequest *GoogleIamV1TestIamPermissionsRequest) *ManagedZonesTestIamPermissionsCall { c := &ManagedZonesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -5672,17 +5939,17 @@ func (c *ManagedZonesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (* if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &GoogleIamV1TestIamPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ @@ -5696,7 +5963,7 @@ func (c *ManagedZonesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (* } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource. If the resource does not exist, this will return an empty set of permissions, not a `NOT_FOUND` error. Note: This operation is designed to be used for building permission-aware UIs and command-line tools, not for authorization checking. This operation may \"fail open\" without warning.", + // "description": "Returns permissions that a caller has on the specified resource. If the resource does not exist, this returns an empty set of permissions, not a `NOT_FOUND` error. Note: This operation is designed to be used for building permission-aware UIs and command-line tools, not for authorization checking. This operation may \"fail open\" without warning.", // "flatPath": "dns/v1/projects/{projectsId}/managedZones/{managedZonesId}:testIamPermissions", // "httpMethod": "POST", // "id": "dns.managedZones.testIamPermissions", @@ -5743,9 +6010,9 @@ type ManagedZonesUpdateCall struct { // Update: Updates an existing ManagedZone. // -// - managedZone: Identifies the managed zone addressed by this request. -// Can be the managed zone name or ID. -// - project: Identifies the project addressed by this request. +// - managedZone: Identifies the managed zone addressed by this request. +// Can be the managed zone name or ID. +// - project: Identifies the project addressed by this request. func (r *ManagedZonesService) Update(project string, managedZone string, managedzone *ManagedZone) *ManagedZonesUpdateCall { c := &ManagedZonesUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -5831,17 +6098,17 @@ func (c *ManagedZonesUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, e if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -5994,17 +6261,17 @@ func (c *PoliciesCreateCall) Do(opts ...googleapi.CallOption) (*Policy, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Policy{ ServerResponse: googleapi.ServerResponse{ @@ -6067,9 +6334,9 @@ type PoliciesDeleteCall struct { // Delete: Deletes a previously created Policy. Fails if the policy is // still being referenced by a network. // -// - policy: User given friendly name of the policy addressed by this -// request. -// - project: Identifies the project addressed by this request. +// - policy: User given friendly name of the policy addressed by this +// request. +// - project: Identifies the project addressed by this request. func (r *PoliciesService) Delete(project string, policy string) *PoliciesDeleteCall { c := &PoliciesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -6144,7 +6411,7 @@ func (c *PoliciesDeleteCall) Do(opts ...googleapi.CallOption) error { } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return err + return gensupport.WrapError(err) } return nil // { @@ -6198,9 +6465,9 @@ type PoliciesGetCall struct { // Get: Fetches the representation of an existing Policy. // -// - policy: User given friendly name of the policy addressed by this -// request. -// - project: Identifies the project addressed by this request. +// - policy: User given friendly name of the policy addressed by this +// request. +// - project: Identifies the project addressed by this request. func (r *PoliciesService) Get(project string, policy string) *PoliciesGetCall { c := &PoliciesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -6293,17 +6560,17 @@ func (c *PoliciesGetCall) Do(opts ...googleapi.CallOption) (*Policy, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Policy{ ServerResponse: googleapi.ServerResponse{ @@ -6469,17 +6736,17 @@ func (c *PoliciesListCall) Do(opts ...googleapi.CallOption) (*PoliciesListRespon if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &PoliciesListResponse{ ServerResponse: googleapi.ServerResponse{ @@ -6568,9 +6835,9 @@ type PoliciesPatchCall struct { // Patch: Applies a partial update to an existing Policy. // -// - policy: User given friendly name of the policy addressed by this -// request. -// - project: Identifies the project addressed by this request. +// - policy: User given friendly name of the policy addressed by this +// request. +// - project: Identifies the project addressed by this request. func (r *PoliciesService) Patch(project string, policy string, policy2 *Policy) *PoliciesPatchCall { c := &PoliciesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -6656,17 +6923,17 @@ func (c *PoliciesPatchCall) Do(opts ...googleapi.CallOption) (*PoliciesPatchResp if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &PoliciesPatchResponse{ ServerResponse: googleapi.ServerResponse{ @@ -6736,9 +7003,9 @@ type PoliciesUpdateCall struct { // Update: Updates an existing Policy. // -// - policy: User given friendly name of the policy addressed by this -// request. -// - project: Identifies the project addressed by this request. +// - policy: User given friendly name of the policy addressed by this +// request. +// - project: Identifies the project addressed by this request. func (r *PoliciesService) Update(project string, policy string, policy2 *Policy) *PoliciesUpdateCall { c := &PoliciesUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -6824,17 +7091,17 @@ func (c *PoliciesUpdateCall) Do(opts ...googleapi.CallOption) (*PoliciesUpdateRe if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &PoliciesUpdateResponse{ ServerResponse: googleapi.ServerResponse{ @@ -6994,17 +7261,17 @@ func (c *ProjectsGetCall) Do(opts ...googleapi.CallOption) (*Project, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Project{ ServerResponse: googleapi.ServerResponse{ @@ -7066,9 +7333,9 @@ type ResourceRecordSetsCreateCall struct { // Create: Creates a new ResourceRecordSet. // -// - managedZone: Identifies the managed zone addressed by this request. -// Can be the managed zone name or ID. -// - project: Identifies the project addressed by this request. +// - managedZone: Identifies the managed zone addressed by this request. +// Can be the managed zone name or ID. +// - project: Identifies the project addressed by this request. func (r *ResourceRecordSetsService) Create(project string, managedZone string, resourcerecordset *ResourceRecordSet) *ResourceRecordSetsCreateCall { c := &ResourceRecordSetsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -7154,17 +7421,17 @@ func (c *ResourceRecordSetsCreateCall) Do(opts ...googleapi.CallOption) (*Resour if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ResourceRecordSet{ ServerResponse: googleapi.ServerResponse{ @@ -7235,11 +7502,11 @@ type ResourceRecordSetsDeleteCall struct { // Delete: Deletes a previously created ResourceRecordSet. // -// - managedZone: Identifies the managed zone addressed by this request. -// Can be the managed zone name or ID. -// - name: Fully qualified domain name. -// - project: Identifies the project addressed by this request. -// - type: RRSet type. +// - managedZone: Identifies the managed zone addressed by this request. +// Can be the managed zone name or ID. +// - name: Fully qualified domain name. +// - project: Identifies the project addressed by this request. +// - type: RRSet type. func (r *ResourceRecordSetsService) Delete(project string, managedZone string, name string, type_ string) *ResourceRecordSetsDeleteCall { c := &ResourceRecordSetsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -7323,17 +7590,17 @@ func (c *ResourceRecordSetsDeleteCall) Do(opts ...googleapi.CallOption) (*Resour if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ResourceRecordSetsDeleteResponse{ ServerResponse: googleapi.ServerResponse{ @@ -7416,11 +7683,11 @@ type ResourceRecordSetsGetCall struct { // Get: Fetches the representation of an existing ResourceRecordSet. // -// - managedZone: Identifies the managed zone addressed by this request. -// Can be the managed zone name or ID. -// - name: Fully qualified domain name. -// - project: Identifies the project addressed by this request. -// - type: RRSet type. +// - managedZone: Identifies the managed zone addressed by this request. +// Can be the managed zone name or ID. +// - name: Fully qualified domain name. +// - project: Identifies the project addressed by this request. +// - type: RRSet type. func (r *ResourceRecordSetsService) Get(project string, managedZone string, name string, type_ string) *ResourceRecordSetsGetCall { c := &ResourceRecordSetsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -7517,17 +7784,17 @@ func (c *ResourceRecordSetsGetCall) Do(opts ...googleapi.CallOption) (*ResourceR if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ResourceRecordSet{ ServerResponse: googleapi.ServerResponse{ @@ -7611,9 +7878,9 @@ type ResourceRecordSetsListCall struct { // List: Enumerates ResourceRecordSets that you have created but not yet // deleted. // -// - managedZone: Identifies the managed zone addressed by this request. -// Can be the managed zone name or ID. -// - project: Identifies the project addressed by this request. +// - managedZone: Identifies the managed zone addressed by this request. +// Can be the managed zone name or ID. +// - project: Identifies the project addressed by this request. func (r *ResourceRecordSetsService) List(project string, managedZone string) *ResourceRecordSetsListCall { c := &ResourceRecordSetsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -7728,17 +7995,17 @@ func (c *ResourceRecordSetsListCall) Do(opts ...googleapi.CallOption) (*Resource if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ResourceRecordSetsListResponse{ ServerResponse: googleapi.ServerResponse{ @@ -7846,11 +8113,11 @@ type ResourceRecordSetsPatchCall struct { // Patch: Applies a partial update to an existing ResourceRecordSet. // -// - managedZone: Identifies the managed zone addressed by this request. -// Can be the managed zone name or ID. -// - name: Fully qualified domain name. -// - project: Identifies the project addressed by this request. -// - type: RRSet type. +// - managedZone: Identifies the managed zone addressed by this request. +// Can be the managed zone name or ID. +// - name: Fully qualified domain name. +// - project: Identifies the project addressed by this request. +// - type: RRSet type. func (r *ResourceRecordSetsService) Patch(project string, managedZone string, name string, type_ string, resourcerecordset *ResourceRecordSet) *ResourceRecordSetsPatchCall { c := &ResourceRecordSetsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -7940,17 +8207,17 @@ func (c *ResourceRecordSetsPatchCall) Do(opts ...googleapi.CallOption) (*Resourc if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ResourceRecordSet{ ServerResponse: googleapi.ServerResponse{ @@ -8117,17 +8384,17 @@ func (c *ResponsePoliciesCreateCall) Do(opts ...googleapi.CallOption) (*Response if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ResponsePolicy{ ServerResponse: googleapi.ServerResponse{ @@ -8190,9 +8457,9 @@ type ResponsePoliciesDeleteCall struct { // Delete: Deletes a previously created Response Policy. Fails if the // response policy is non-empty or still being referenced by a network. // -// - project: Identifies the project addressed by this request. -// - responsePolicy: User assigned name of the Response Policy addressed -// by this request. +// - project: Identifies the project addressed by this request. +// - responsePolicy: User assigned name of the Response Policy addressed +// by this request. func (r *ResponsePoliciesService) Delete(project string, responsePolicy string) *ResponsePoliciesDeleteCall { c := &ResponsePoliciesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -8267,7 +8534,7 @@ func (c *ResponsePoliciesDeleteCall) Do(opts ...googleapi.CallOption) error { } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return err + return gensupport.WrapError(err) } return nil // { @@ -8321,9 +8588,9 @@ type ResponsePoliciesGetCall struct { // Get: Fetches the representation of an existing Response Policy. // -// - project: Identifies the project addressed by this request. -// - responsePolicy: User assigned name of the Response Policy addressed -// by this request. +// - project: Identifies the project addressed by this request. +// - responsePolicy: User assigned name of the Response Policy addressed +// by this request. func (r *ResponsePoliciesService) Get(project string, responsePolicy string) *ResponsePoliciesGetCall { c := &ResponsePoliciesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -8416,17 +8683,17 @@ func (c *ResponsePoliciesGetCall) Do(opts ...googleapi.CallOption) (*ResponsePol if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ResponsePolicy{ ServerResponse: googleapi.ServerResponse{ @@ -8592,17 +8859,17 @@ func (c *ResponsePoliciesListCall) Do(opts ...googleapi.CallOption) (*ResponsePo if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ResponsePoliciesListResponse{ ServerResponse: googleapi.ServerResponse{ @@ -8691,9 +8958,9 @@ type ResponsePoliciesPatchCall struct { // Patch: Applies a partial update to an existing Response Policy. // -// - project: Identifies the project addressed by this request. -// - responsePolicy: User assigned name of the Respones Policy addressed -// by this request. +// - project: Identifies the project addressed by this request. +// - responsePolicy: User assigned name of the response policy addressed +// by this request. func (r *ResponsePoliciesService) Patch(project string, responsePolicy string, responsepolicy *ResponsePolicy) *ResponsePoliciesPatchCall { c := &ResponsePoliciesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -8779,17 +9046,17 @@ func (c *ResponsePoliciesPatchCall) Do(opts ...googleapi.CallOption) (*ResponseP if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ResponsePoliciesPatchResponse{ ServerResponse: googleapi.ServerResponse{ @@ -8824,7 +9091,7 @@ func (c *ResponsePoliciesPatchCall) Do(opts ...googleapi.CallOption) (*ResponseP // "type": "string" // }, // "responsePolicy": { - // "description": "User assigned name of the Respones Policy addressed by this request.", + // "description": "User assigned name of the response policy addressed by this request.", // "location": "path", // "required": true, // "type": "string" @@ -8859,9 +9126,9 @@ type ResponsePoliciesUpdateCall struct { // Update: Updates an existing Response Policy. // -// - project: Identifies the project addressed by this request. -// - responsePolicy: User assigned name of the Response Policy addressed -// by this request. +// - project: Identifies the project addressed by this request. +// - responsePolicy: User assigned name of the Response Policy addressed +// by this request. func (r *ResponsePoliciesService) Update(project string, responsePolicy string, responsepolicy *ResponsePolicy) *ResponsePoliciesUpdateCall { c := &ResponsePoliciesUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -8947,17 +9214,17 @@ func (c *ResponsePoliciesUpdateCall) Do(opts ...googleapi.CallOption) (*Response if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ResponsePoliciesUpdateResponse{ ServerResponse: googleapi.ServerResponse{ @@ -9027,9 +9294,9 @@ type ResponsePolicyRulesCreateCall struct { // Create: Creates a new Response Policy Rule. // -// - project: Identifies the project addressed by this request. -// - responsePolicy: User assigned name of the Response Policy -// containing the Response Policy Rule. +// - project: Identifies the project addressed by this request. +// - responsePolicy: User assigned name of the Response Policy +// containing the Response Policy Rule. func (r *ResponsePolicyRulesService) Create(project string, responsePolicy string, responsepolicyrule *ResponsePolicyRule) *ResponsePolicyRulesCreateCall { c := &ResponsePolicyRulesCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -9115,17 +9382,17 @@ func (c *ResponsePolicyRulesCreateCall) Do(opts ...googleapi.CallOption) (*Respo if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ResponsePolicyRule{ ServerResponse: googleapi.ServerResponse{ @@ -9195,11 +9462,11 @@ type ResponsePolicyRulesDeleteCall struct { // Delete: Deletes a previously created Response Policy Rule. // -// - project: Identifies the project addressed by this request. -// - responsePolicy: User assigned name of the Response Policy -// containing the Response Policy Rule. -// - responsePolicyRule: User assigned name of the Response Policy Rule -// addressed by this request. +// - project: Identifies the project addressed by this request. +// - responsePolicy: User assigned name of the Response Policy +// containing the Response Policy Rule. +// - responsePolicyRule: User assigned name of the Response Policy Rule +// addressed by this request. func (r *ResponsePolicyRulesService) Delete(project string, responsePolicy string, responsePolicyRule string) *ResponsePolicyRulesDeleteCall { c := &ResponsePolicyRulesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -9276,7 +9543,7 @@ func (c *ResponsePolicyRulesDeleteCall) Do(opts ...googleapi.CallOption) error { } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return err + return gensupport.WrapError(err) } return nil // { @@ -9338,11 +9605,11 @@ type ResponsePolicyRulesGetCall struct { // Get: Fetches the representation of an existing Response Policy Rule. // -// - project: Identifies the project addressed by this request. -// - responsePolicy: User assigned name of the Response Policy -// containing the Response Policy Rule. -// - responsePolicyRule: User assigned name of the Response Policy Rule -// addressed by this request. +// - project: Identifies the project addressed by this request. +// - responsePolicy: User assigned name of the Response Policy +// containing the Response Policy Rule. +// - responsePolicyRule: User assigned name of the Response Policy Rule +// addressed by this request. func (r *ResponsePolicyRulesService) Get(project string, responsePolicy string, responsePolicyRule string) *ResponsePolicyRulesGetCall { c := &ResponsePolicyRulesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -9437,17 +9704,17 @@ func (c *ResponsePolicyRulesGetCall) Do(opts ...googleapi.CallOption) (*Response if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ResponsePolicyRule{ ServerResponse: googleapi.ServerResponse{ @@ -9624,17 +9891,17 @@ func (c *ResponsePolicyRulesListCall) Do(opts ...googleapi.CallOption) (*Respons if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ResponsePolicyRulesListResponse{ ServerResponse: googleapi.ServerResponse{ @@ -9731,11 +9998,11 @@ type ResponsePolicyRulesPatchCall struct { // Patch: Applies a partial update to an existing Response Policy Rule. // -// - project: Identifies the project addressed by this request. -// - responsePolicy: User assigned name of the Response Policy -// containing the Response Policy Rule. -// - responsePolicyRule: User assigned name of the Response Policy Rule -// addressed by this request. +// - project: Identifies the project addressed by this request. +// - responsePolicy: User assigned name of the Response Policy +// containing the Response Policy Rule. +// - responsePolicyRule: User assigned name of the Response Policy Rule +// addressed by this request. func (r *ResponsePolicyRulesService) Patch(project string, responsePolicy string, responsePolicyRule string, responsepolicyrule *ResponsePolicyRule) *ResponsePolicyRulesPatchCall { c := &ResponsePolicyRulesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -9823,17 +10090,17 @@ func (c *ResponsePolicyRulesPatchCall) Do(opts ...googleapi.CallOption) (*Respon if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ResponsePolicyRulesPatchResponse{ ServerResponse: googleapi.ServerResponse{ @@ -9911,11 +10178,11 @@ type ResponsePolicyRulesUpdateCall struct { // Update: Updates an existing Response Policy Rule. // -// - project: Identifies the project addressed by this request. -// - responsePolicy: User assigned name of the Response Policy -// containing the Response Policy Rule. -// - responsePolicyRule: User assigned name of the Response Policy Rule -// addressed by this request. +// - project: Identifies the project addressed by this request. +// - responsePolicy: User assigned name of the Response Policy +// containing the Response Policy Rule. +// - responsePolicyRule: User assigned name of the Response Policy Rule +// addressed by this request. func (r *ResponsePolicyRulesService) Update(project string, responsePolicy string, responsePolicyRule string, responsepolicyrule *ResponsePolicyRule) *ResponsePolicyRulesUpdateCall { c := &ResponsePolicyRulesUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -10004,17 +10271,17 @@ func (c *ResponsePolicyRulesUpdateCall) Do(opts ...googleapi.CallOption) (*Respo if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ResponsePolicyRulesUpdateResponse{ ServerResponse: googleapi.ServerResponse{ diff --git a/vendor/google.golang.org/api/googleapi/googleapi.go b/vendor/google.golang.org/api/googleapi/googleapi.go index 2d3e00edc..b328a7976 100644 --- a/vendor/google.golang.org/api/googleapi/googleapi.go +++ b/vendor/google.golang.org/api/googleapi/googleapi.go @@ -79,6 +79,9 @@ type Error struct { Header http.Header Errors []ErrorItem + // err is typically a wrapped apierror.APIError, see + // google-api-go-client/internal/gensupport/error.go. + err error } // ErrorItem is a detailed error code & message from the Google API frontend. @@ -122,6 +125,15 @@ func (e *Error) Error() string { return buf.String() } +// Wrap allows an existing Error to wrap another error. See also [Error.Unwrap]. +func (e *Error) Wrap(err error) { + e.err = err +} + +func (e *Error) Unwrap() error { + return e.err +} + type errorReply struct { Error *Error `json:"error"` } @@ -174,8 +186,9 @@ func CheckMediaResponse(res *http.Response) error { } slurp, _ := ioutil.ReadAll(io.LimitReader(res.Body, 1<<20)) return &Error{ - Code: res.StatusCode, - Body: string(slurp), + Code: res.StatusCode, + Body: string(slurp), + Header: res.Header, } } @@ -382,11 +395,11 @@ func ConvertVariant(v map[string]interface{}, dst interface{}) bool { // For example, if your response has a "NextPageToken" and a slice of "Items" with "Id" fields, // you could request just those fields like this: // -// svc.Events.List().Fields("nextPageToken", "items/id").Do() +// svc.Events.List().Fields("nextPageToken", "items/id").Do() // // or if you were also interested in each Item's "Updated" field, you can combine them like this: // -// svc.Events.List().Fields("nextPageToken", "items(id,updated)").Do() +// svc.Events.List().Fields("nextPageToken", "items(id,updated)").Do() // // Another way to find field names is through the Google API explorer: // https://developers.google.com/apis-explorer/#p/ diff --git a/vendor/google.golang.org/api/googleapi/transport/apikey.go b/vendor/google.golang.org/api/googleapi/transport/apikey.go index 61720ec2e..f5d826c2a 100644 --- a/vendor/google.golang.org/api/googleapi/transport/apikey.go +++ b/vendor/google.golang.org/api/googleapi/transport/apikey.go @@ -7,7 +7,7 @@ // // This package is DEPRECATED. Users should instead use, // -// service, err := NewService(..., option.WithAPIKey(...)) +// service, err := NewService(..., option.WithAPIKey(...)) package transport import ( diff --git a/vendor/google.golang.org/api/transport/cert/default_cert.go b/vendor/google.golang.org/api/internal/cert/default_cert.go similarity index 100% rename from vendor/google.golang.org/api/transport/cert/default_cert.go rename to vendor/google.golang.org/api/internal/cert/default_cert.go diff --git a/vendor/google.golang.org/api/transport/cert/enterprise_cert.go b/vendor/google.golang.org/api/internal/cert/enterprise_cert.go similarity index 93% rename from vendor/google.golang.org/api/transport/cert/enterprise_cert.go rename to vendor/google.golang.org/api/internal/cert/enterprise_cert.go index eaa52e07c..1061b5f05 100644 --- a/vendor/google.golang.org/api/transport/cert/enterprise_cert.go +++ b/vendor/google.golang.org/api/internal/cert/enterprise_cert.go @@ -15,7 +15,6 @@ package cert import ( "crypto/tls" "errors" - "os" "github.com/googleapis/enterprise-certificate-proxy/client" ) @@ -36,8 +35,7 @@ type ecpSource struct { func NewEnterpriseCertificateProxySource(configFilePath string) (Source, error) { key, err := client.Cred(configFilePath) if err != nil { - if errors.Is(err, os.ErrNotExist) { - // Config file missing means Enterprise Certificate Proxy is not supported. + if errors.Is(err, client.ErrCredUnavailable) { return nil, errSourceUnavailable } return nil, err diff --git a/vendor/google.golang.org/api/transport/cert/secureconnect_cert.go b/vendor/google.golang.org/api/internal/cert/secureconnect_cert.go similarity index 100% rename from vendor/google.golang.org/api/transport/cert/secureconnect_cert.go rename to vendor/google.golang.org/api/internal/cert/secureconnect_cert.go diff --git a/vendor/google.golang.org/api/internal/creds.go b/vendor/google.golang.org/api/internal/creds.go index b067a179b..63c660922 100644 --- a/vendor/google.golang.org/api/internal/creds.go +++ b/vendor/google.golang.org/api/internal/creds.go @@ -6,10 +6,15 @@ package internal import ( "context" + "crypto/tls" "encoding/json" "errors" "fmt" "io/ioutil" + "net" + "net/http" + "os" + "time" "golang.org/x/oauth2" "google.golang.org/api/internal/impersonate" @@ -17,6 +22,8 @@ import ( "golang.org/x/oauth2/google" ) +const quotaProjectEnvVar = "GOOGLE_CLOUD_QUOTA_PROJECT" + // Creds returns credential information obtained from DialSettings, or if none, then // it returns default credential information. func Creds(ctx context.Context, ds *DialSettings) (*google.Credentials, error) { @@ -70,17 +77,35 @@ const ( // // - A self-signed JWT flow will be executed if the following conditions are // met: -// (1) At least one of the following is true: -// (a) No scope is provided -// (b) Scope for self-signed JWT flow is enabled -// (c) Audiences are explicitly provided by users -// (2) No service account impersontation +// +// (1) At least one of the following is true: +// (a) No scope is provided +// (b) Scope for self-signed JWT flow is enabled +// (c) Audiences are explicitly provided by users +// (2) No service account impersontation // // - Otherwise, executes standard OAuth 2.0 flow // More details: google.aip.dev/auth/4111 func credentialsFromJSON(ctx context.Context, data []byte, ds *DialSettings) (*google.Credentials, error) { + var params google.CredentialsParams + params.Scopes = ds.GetScopes() + + // Determine configurations for the OAuth2 transport, which is separate from the API transport. + // The OAuth2 transport and endpoint will be configured for mTLS if applicable. + clientCertSource, oauth2Endpoint, err := GetClientCertificateSourceAndEndpoint(oauth2DialSettings(ds)) + if err != nil { + return nil, err + } + params.TokenURL = oauth2Endpoint + if clientCertSource != nil { + tlsConfig := &tls.Config{ + GetClientCertificate: clientCertSource, + } + ctx = context.WithValue(ctx, oauth2.HTTPClient, customHTTPClient(tlsConfig)) + } + // By default, a standard OAuth 2.0 token source is created - cred, err := google.CredentialsFromJSON(ctx, data, ds.GetScopes()...) + cred, err := google.CredentialsFromJSONWithParams(ctx, data, params) if err != nil { return nil, err } @@ -130,14 +155,22 @@ func selfSignedJWTTokenSource(data []byte, ds *DialSettings) (oauth2.TokenSource } } -// QuotaProjectFromCreds returns the quota project from the JSON blob in the provided credentials. -// -// NOTE(cbro): consider promoting this to a field on google.Credentials. -func QuotaProjectFromCreds(cred *google.Credentials) string { +// GetQuotaProject retrieves quota project with precedence being: client option, +// environment variable, creds file. +func GetQuotaProject(creds *google.Credentials, clientOpt string) string { + if clientOpt != "" { + return clientOpt + } + if env := os.Getenv(quotaProjectEnvVar); env != "" { + return env + } + if creds == nil { + return "" + } var v struct { QuotaProject string `json:"quota_project_id"` } - if err := json.Unmarshal(cred.JSON, &v); err != nil { + if err := json.Unmarshal(creds.JSON, &v); err != nil { return "" } return v.QuotaProject @@ -156,3 +189,35 @@ func impersonateCredentials(ctx context.Context, creds *google.Credentials, ds * ProjectID: creds.ProjectID, }, nil } + +// oauth2DialSettings returns the settings to be used by the OAuth2 transport, which is separate from the API transport. +func oauth2DialSettings(ds *DialSettings) *DialSettings { + var ods DialSettings + ods.DefaultEndpoint = google.Endpoint.TokenURL + ods.DefaultMTLSEndpoint = google.MTLSTokenURL + ods.ClientCertSource = ds.ClientCertSource + return &ods +} + +// customHTTPClient constructs an HTTPClient using the provided tlsConfig, to support mTLS. +func customHTTPClient(tlsConfig *tls.Config) *http.Client { + trans := baseTransport() + trans.TLSClientConfig = tlsConfig + return &http.Client{Transport: trans} +} + +func baseTransport() *http.Transport { + return &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + DualStack: true, + }).DialContext, + MaxIdleConns: 100, + MaxIdleConnsPerHost: 100, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + } +} diff --git a/vendor/google.golang.org/api/transport/internal/dca/dca.go b/vendor/google.golang.org/api/internal/dca.go similarity index 79% rename from vendor/google.golang.org/api/transport/internal/dca/dca.go rename to vendor/google.golang.org/api/internal/dca.go index 071586e94..204a3fd2f 100644 --- a/vendor/google.golang.org/api/transport/internal/dca/dca.go +++ b/vendor/google.golang.org/api/internal/dca.go @@ -6,32 +6,33 @@ // Authentication according to https://google.aip.dev/auth/4114 // // The overall logic for DCA is as follows: -// 1. If both endpoint override and client certificate are specified, use them as is. -// 2. If user does not specify client certificate, we will attempt to use default -// client certificate. -// 3. If user does not specify endpoint override, we will use defaultMtlsEndpoint if -// client certificate is available and defaultEndpoint otherwise. +// 1. If both endpoint override and client certificate are specified, use them as is. +// 2. If user does not specify client certificate, we will attempt to use default +// client certificate. +// 3. If user does not specify endpoint override, we will use defaultMtlsEndpoint if +// client certificate is available and defaultEndpoint otherwise. // // Implications of the above logic: -// 1. If the user specifies a non-mTLS endpoint override but client certificate is -// available, we will pass along the cert anyway and let the server decide what to do. -// 2. If the user specifies an mTLS endpoint override but client certificate is not -// available, we will not fail-fast, but let backend throw error when connecting. +// 1. If the user specifies a non-mTLS endpoint override but client certificate is +// available, we will pass along the cert anyway and let the server decide what to do. +// 2. If the user specifies an mTLS endpoint override but client certificate is not +// available, we will not fail-fast, but let backend throw error when connecting. // // We would like to avoid introducing client-side logic that parses whether the // endpoint override is an mTLS url, since the url pattern may change at anytime. // // This package is not intended for use by end developers. Use the // google.golang.org/api/option package to configure API clients. -package dca + +// Package internal supports the options and transport packages. +package internal import ( "net/url" "os" "strings" - "google.golang.org/api/internal" - "google.golang.org/api/transport/cert" + "google.golang.org/api/internal/cert" ) const ( @@ -43,7 +44,7 @@ const ( // GetClientCertificateSourceAndEndpoint is a convenience function that invokes // getClientCertificateSource and getEndpoint sequentially and returns the client // cert source and endpoint as a tuple. -func GetClientCertificateSourceAndEndpoint(settings *internal.DialSettings) (cert.Source, string, error) { +func GetClientCertificateSourceAndEndpoint(settings *DialSettings) (cert.Source, string, error) { clientCertSource, err := getClientCertificateSource(settings) if err != nil { return nil, "", err @@ -65,7 +66,7 @@ func GetClientCertificateSourceAndEndpoint(settings *internal.DialSettings) (cer // Important Note: For now, the environment variable GOOGLE_API_USE_CLIENT_CERTIFICATE // must be set to "true" to allow certificate to be used (including user provided // certificates). For details, see AIP-4114. -func getClientCertificateSource(settings *internal.DialSettings) (cert.Source, error) { +func getClientCertificateSource(settings *DialSettings) (cert.Source, error) { if !isClientCertificateEnabled() { return nil, nil } else if settings.ClientCertSource != nil { @@ -94,7 +95,7 @@ func isClientCertificateEnabled() bool { // URL (ex. https://...), then the user-provided address will be merged into // the default endpoint. For example, WithEndpoint("myhost:8000") and // WithDefaultEndpoint("https://foo.com/bar/baz") will return "https://myhost:8080/bar/baz" -func getEndpoint(settings *internal.DialSettings, clientCertSource cert.Source) (string, error) { +func getEndpoint(settings *DialSettings, clientCertSource cert.Source) (string, error) { if settings.Endpoint == "" { mtlsMode := getMTLSMode() if mtlsMode == mTLSModeAlways || (clientCertSource != nil && mtlsMode == mTLSModeAuto) { diff --git a/vendor/google.golang.org/api/internal/gensupport/error.go b/vendor/google.golang.org/api/internal/gensupport/error.go new file mode 100644 index 000000000..886c6532b --- /dev/null +++ b/vendor/google.golang.org/api/internal/gensupport/error.go @@ -0,0 +1,24 @@ +// Copyright 2022 Google LLC. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gensupport + +import ( + "errors" + + "github.com/googleapis/gax-go/v2/apierror" + "google.golang.org/api/googleapi" +) + +// WrapError creates an [apierror.APIError] from err, wraps it in err, and +// returns err. If err is not a [googleapi.Error] (or a +// [google.golang.org/grpc/status.Status]), it returns err without modification. +func WrapError(err error) error { + var herr *googleapi.Error + apiError, ok := apierror.ParseError(err, false) + if ok && errors.As(err, &herr) { + herr.Wrap(apiError) + } + return err +} diff --git a/vendor/google.golang.org/api/internal/gensupport/json.go b/vendor/google.golang.org/api/internal/gensupport/json.go index c01e32189..eab49a11e 100644 --- a/vendor/google.golang.org/api/internal/gensupport/json.go +++ b/vendor/google.golang.org/api/internal/gensupport/json.go @@ -13,9 +13,10 @@ import ( // MarshalJSON returns a JSON encoding of schema containing only selected fields. // A field is selected if any of the following is true: -// * it has a non-empty value -// * its field name is present in forceSendFields and it is not a nil pointer or nil interface -// * its field name is present in nullFields. +// - it has a non-empty value +// - its field name is present in forceSendFields and it is not a nil pointer or nil interface +// - its field name is present in nullFields. +// // The JSON key for each selected field is taken from the field's json: struct tag. func MarshalJSON(schema interface{}, forceSendFields, nullFields []string) ([]byte, error) { if len(forceSendFields) == 0 && len(nullFields) == 0 { @@ -85,7 +86,12 @@ func schemaToMap(schema interface{}, mustInclude, useNull map[string]bool, useNu if f.Type.Kind() == reflect.Map && useNullMaps[f.Name] != nil { ms, ok := v.Interface().(map[string]string) if !ok { - return nil, fmt.Errorf("field %q has keys in NullFields but is not a map[string]string", f.Name) + mi, err := initMapSlow(v, f.Name, useNullMaps) + if err != nil { + return nil, err + } + m[tag.apiName] = mi + continue } mi := map[string]interface{}{} for k, v := range ms { @@ -119,6 +125,25 @@ func schemaToMap(schema interface{}, mustInclude, useNull map[string]bool, useNu return m, nil } +// initMapSlow uses reflection to build up a map object. This is slower than +// the default behavior so it should be used only as a fallback. +func initMapSlow(rv reflect.Value, fieldName string, useNullMaps map[string]map[string]bool) (map[string]interface{}, error) { + mi := map[string]interface{}{} + iter := rv.MapRange() + for iter.Next() { + k, ok := iter.Key().Interface().(string) + if !ok { + return nil, fmt.Errorf("field %q has keys in NullFields but is not a map[string]any", fieldName) + } + v := iter.Value().Interface() + mi[k] = v + } + for k := range useNullMaps[fieldName] { + mi[k] = nil + } + return mi, nil +} + // formatAsString returns a string representation of v, dereferencing it first if possible. func formatAsString(v reflect.Value, kind reflect.Kind) string { if kind == reflect.Ptr && !v.IsNil() { diff --git a/vendor/google.golang.org/api/internal/gensupport/media.go b/vendor/google.golang.org/api/internal/gensupport/media.go index d14a22470..8356e7f27 100644 --- a/vendor/google.golang.org/api/internal/gensupport/media.go +++ b/vendor/google.golang.org/api/internal/gensupport/media.go @@ -17,92 +17,10 @@ import ( "sync" "time" + gax "github.com/googleapis/gax-go/v2" "google.golang.org/api/googleapi" ) -const sniffBuffSize = 512 - -func newContentSniffer(r io.Reader) *contentSniffer { - return &contentSniffer{r: r} -} - -// contentSniffer wraps a Reader, and reports the content type determined by sniffing up to 512 bytes from the Reader. -type contentSniffer struct { - r io.Reader - start []byte // buffer for the sniffed bytes. - err error // set to any error encountered while reading bytes to be sniffed. - - ctype string // set on first sniff. - sniffed bool // set to true on first sniff. -} - -func (cs *contentSniffer) Read(p []byte) (n int, err error) { - // Ensure that the content type is sniffed before any data is consumed from Reader. - _, _ = cs.ContentType() - - if len(cs.start) > 0 { - n := copy(p, cs.start) - cs.start = cs.start[n:] - return n, nil - } - - // We may have read some bytes into start while sniffing, even if the read ended in an error. - // We should first return those bytes, then the error. - if cs.err != nil { - return 0, cs.err - } - - // Now we have handled all bytes that were buffered while sniffing. Now just delegate to the underlying reader. - return cs.r.Read(p) -} - -// ContentType returns the sniffed content type, and whether the content type was successfully sniffed. -func (cs *contentSniffer) ContentType() (string, bool) { - if cs.sniffed { - return cs.ctype, cs.ctype != "" - } - cs.sniffed = true - // If ReadAll hits EOF, it returns err==nil. - cs.start, cs.err = ioutil.ReadAll(io.LimitReader(cs.r, sniffBuffSize)) - - // Don't try to detect the content type based on possibly incomplete data. - if cs.err != nil { - return "", false - } - - cs.ctype = http.DetectContentType(cs.start) - return cs.ctype, true -} - -// DetermineContentType determines the content type of the supplied reader. -// If the content type is already known, it can be specified via ctype. -// Otherwise, the content of media will be sniffed to determine the content type. -// If media implements googleapi.ContentTyper (deprecated), this will be used -// instead of sniffing the content. -// After calling DetectContentType the caller must not perform further reads on -// media, but rather read from the Reader that is returned. -func DetermineContentType(media io.Reader, ctype string) (io.Reader, string) { - // Note: callers could avoid calling DetectContentType if ctype != "", - // but doing the check inside this function reduces the amount of - // generated code. - if ctype != "" { - return media, ctype - } - - // For backwards compatibility, allow clients to set content - // type by providing a ContentTyper for media. - if typer, ok := media.(googleapi.ContentTyper); ok { - return media, typer.ContentType() - } - - sniffer := newContentSniffer(media) - if ctype, ok := sniffer.ContentType(); ok { - return sniffer, ctype - } - // If content type could not be sniffed, reads from sniffer will eventually fail with an error. - return sniffer, "" -} - type typeReader struct { io.Reader typ string @@ -234,7 +152,10 @@ func NewInfoFromMedia(r io.Reader, options []googleapi.MediaOption) *MediaInfo { mi := &MediaInfo{} opts := googleapi.ProcessMediaOptions(options) if !opts.ForceEmptyContentType { - r, mi.mType = DetermineContentType(r, opts.ContentType) + mi.mType = opts.ContentType + if mi.mType == "" { + r, mi.mType = gax.DetermineContentType(r) + } } mi.chunkRetryDeadline = opts.ChunkRetryDeadline mi.media, mi.buffer, mi.singleChunk = PrepareUpload(r, opts.ChunkSize) @@ -245,7 +166,11 @@ func NewInfoFromMedia(r io.Reader, options []googleapi.MediaOption) *MediaInfo { // call. It returns a MediaInfo using the given reader, size and media type. func NewInfoFromResumableMedia(r io.ReaderAt, size int64, mediaType string) *MediaInfo { rdr := ReaderAtToReader(r, size) - rdr, mType := DetermineContentType(rdr, mediaType) + mType := mediaType + if mType == "" { + rdr, mType = gax.DetermineContentType(rdr) + } + return &MediaInfo{ size: size, mType: mType, @@ -289,13 +214,12 @@ func (mi *MediaInfo) UploadRequest(reqHeaders http.Header, body io.Reader) (newB // be retried because the data is stored in the MediaBuffer. media, _, _, _ = mi.buffer.Chunk() } + toCleanup := []io.Closer{} if media != nil { fb := readerFunc(body) fm := readerFunc(media) combined, ctype := CombineBodyMedia(body, "application/json", media, mi.mType) - toCleanup := []io.Closer{ - combined, - } + toCleanup = append(toCleanup, combined) if fb != nil && fm != nil { getBody = func() (io.ReadCloser, error) { rb := ioutil.NopCloser(fb()) @@ -309,18 +233,30 @@ func (mi *MediaInfo) UploadRequest(reqHeaders http.Header, body io.Reader) (newB return r, nil } } - cleanup = func() { - for _, closer := range toCleanup { - _ = closer.Close() - } - - } reqHeaders.Set("Content-Type", ctype) body = combined } if mi.buffer != nil && mi.mType != "" && !mi.singleChunk { + // This happens when initiating a resumable upload session. + // The initial request contains a JSON body rather than media. + // It can be retried with a getBody function that re-creates the request body. + fb := readerFunc(body) + if fb != nil { + getBody = func() (io.ReadCloser, error) { + rb := ioutil.NopCloser(fb()) + toCleanup = append(toCleanup, rb) + return rb, nil + } + } reqHeaders.Set("X-Upload-Content-Type", mi.mType) } + // Ensure that any bodies created in getBody are cleaned up. + cleanup = func() { + for _, closer := range toCleanup { + _ = closer.Close() + } + + } return body, getBody, cleanup } diff --git a/vendor/google.golang.org/api/internal/gensupport/params.go b/vendor/google.golang.org/api/internal/gensupport/params.go index 6703721ff..1a30d2ca2 100644 --- a/vendor/google.golang.org/api/internal/gensupport/params.go +++ b/vendor/google.golang.org/api/internal/gensupport/params.go @@ -37,7 +37,7 @@ func (u URLParams) SetMulti(key string, values []string) { u[key] = values } -// Encode encodes the values into ``URL encoded'' form +// Encode encodes the values into “URL encoded” form // ("bar=baz&foo=quux") sorted by key. func (u URLParams) Encode() string { return url.Values(u).Encode() diff --git a/vendor/google.golang.org/api/internal/gensupport/resumable.go b/vendor/google.golang.org/api/internal/gensupport/resumable.go index 0c659188d..f168ea6d2 100644 --- a/vendor/google.golang.org/api/internal/gensupport/resumable.go +++ b/vendor/google.golang.org/api/internal/gensupport/resumable.go @@ -193,22 +193,27 @@ func (rx *ResumableUpload) Upload(ctx context.Context) (resp *http.Response, err // Each chunk gets its own initialized-at-zero backoff and invocation ID. bo := rx.Retry.backoff() - quitAfter := time.After(retryDeadline) + quitAfterTimer := time.NewTimer(retryDeadline) rx.attempts = 1 rx.invocationID = uuid.New().String() // Retry loop for a single chunk. for { + pauseTimer := time.NewTimer(pause) select { case <-ctx.Done(): + quitAfterTimer.Stop() + pauseTimer.Stop() if err == nil { err = ctx.Err() } return prepareReturn(resp, err) - case <-time.After(pause): - case <-quitAfter: + case <-pauseTimer.C: + case <-quitAfterTimer.C: + pauseTimer.Stop() return prepareReturn(resp, err) } + pauseTimer.Stop() // Check for context cancellation or timeout once more. If more than one // case in the select statement above was satisfied at the same time, Go @@ -217,11 +222,12 @@ func (rx *ResumableUpload) Upload(ctx context.Context) (resp *http.Response, err // canceled before or the timeout was reached. select { case <-ctx.Done(): + quitAfterTimer.Stop() if err == nil { err = ctx.Err() } return prepareReturn(resp, err) - case <-quitAfter: + case <-quitAfterTimer.C: return prepareReturn(resp, err) default: } @@ -235,6 +241,7 @@ func (rx *ResumableUpload) Upload(ctx context.Context) (resp *http.Response, err // Check if we should retry the request. if !errorFunc(status, err) { + quitAfterTimer.Stop() break } diff --git a/vendor/google.golang.org/api/internal/gensupport/send.go b/vendor/google.golang.org/api/internal/gensupport/send.go index 70a8e01c1..85c7bcbfd 100644 --- a/vendor/google.golang.org/api/internal/gensupport/send.go +++ b/vendor/google.golang.org/api/internal/gensupport/send.go @@ -17,6 +17,27 @@ import ( "github.com/googleapis/gax-go/v2" ) +// Use this error type to return an error which allows introspection of both +// the context error and the error from the service. +type wrappedCallErr struct { + ctxErr error + wrappedErr error +} + +func (e wrappedCallErr) Error() string { + return fmt.Sprintf("retry failed with %v; last error: %v", e.ctxErr, e.wrappedErr) +} + +func (e wrappedCallErr) Unwrap() error { + return e.wrappedErr +} + +// Is allows errors.Is to match the error from the call as well as context +// sentinel errors. +func (e wrappedCallErr) Is(target error) bool { + return errors.Is(e.ctxErr, target) || errors.Is(e.wrappedErr, target) +} + // SendRequest sends a single HTTP request using the given client. // If ctx is non-nil, it calls all hooks, then sends the request with // req.WithContext, then calls any functions returned by the hooks in @@ -94,15 +115,17 @@ func sendAndRetry(ctx context.Context, client *http.Client, req *http.Request, r var errorFunc = retry.errorFunc() for { + t := time.NewTimer(pause) select { case <-ctx.Done(): - // If we got an error, and the context has been canceled, - // the context's error is probably more useful. - if err == nil { - err = ctx.Err() + t.Stop() + // If we got an error and the context has been canceled, return an error acknowledging + // both the context cancelation and the service error. + if err != nil { + return resp, wrappedCallErr{ctx.Err(), err} } - return resp, err - case <-time.After(pause): + return resp, ctx.Err() + case <-t.C: } if ctx.Err() != nil { @@ -110,10 +133,10 @@ func sendAndRetry(ctx context.Context, client *http.Client, req *http.Request, r // select is satisfied at the same time, Go will choose one arbitrarily. // That can cause an operation to go through even if the context was // canceled before. - if err == nil { - err = ctx.Err() + if err != nil { + return resp, wrappedCallErr{ctx.Err(), err} } - return resp, err + return resp, ctx.Err() } invocationHeader := fmt.Sprintf("gccl-invocation-id/%s gccl-attempt-count/%d", invocationID, attempts) xGoogHeader := strings.Join([]string{invocationHeader, baseXGoogHeader}, " ") diff --git a/vendor/google.golang.org/api/internal/version.go b/vendor/google.golang.org/api/internal/version.go index 046c62ec1..7a4f6d898 100644 --- a/vendor/google.golang.org/api/internal/version.go +++ b/vendor/google.golang.org/api/internal/version.go @@ -5,4 +5,4 @@ package internal // Version is the current tagged release of the library. -const Version = "0.91.0" +const Version = "0.114.0" diff --git a/vendor/google.golang.org/api/option/internaloption/internaloption.go b/vendor/google.golang.org/api/option/internaloption/internaloption.go index 343a5a965..cc7ebfe27 100644 --- a/vendor/google.golang.org/api/option/internaloption/internaloption.go +++ b/vendor/google.golang.org/api/option/internaloption/internaloption.go @@ -134,3 +134,10 @@ type withCreds google.Credentials func (w *withCreds) Apply(o *internal.DialSettings) { o.InternalCredentials = (*google.Credentials)(w) } + +// EmbeddableAdapter is a no-op option.ClientOption that allow libraries to +// create their own client options by embedding this type into their own +// client-specific option wrapper. See example for usage. +type EmbeddableAdapter struct{} + +func (*EmbeddableAdapter) Apply(_ *internal.DialSettings) {} diff --git a/vendor/google.golang.org/api/option/option.go b/vendor/google.golang.org/api/option/option.go index 60743c63e..b2085a194 100644 --- a/vendor/google.golang.org/api/option/option.go +++ b/vendor/google.golang.org/api/option/option.go @@ -82,6 +82,9 @@ func (w withEndpoint) Apply(o *internal.DialSettings) { // WithScopes returns a ClientOption that overrides the default OAuth2 scopes // to be used for a service. +// +// If both WithScopes and WithTokenSource are used, scope settings from the +// token source will be used instead. func WithScopes(scope ...string) ClientOption { return withScopes(scope) } @@ -93,7 +96,9 @@ func (w withScopes) Apply(o *internal.DialSettings) { copy(o.Scopes, w) } -// WithUserAgent returns a ClientOption that sets the User-Agent. +// WithUserAgent returns a ClientOption that sets the User-Agent. This option +// is incompatible with the [WithHTTPClient] option. If you wish to provide a +// custom client you will need to add this header via RoundTripper middleware. func WithUserAgent(ua string) ClientOption { return withUA(ua) } @@ -287,10 +292,10 @@ func (w withClientCertSource) Apply(o *internal.DialSettings) { // service account SA2 while using delegate service accounts DSA1 and DSA2, // the following must be true: // -// 1. Base service account SA1 has roles/iam.serviceAccountTokenCreator on -// DSA1. -// 2. DSA1 has roles/iam.serviceAccountTokenCreator on DSA2. -// 3. DSA2 has roles/iam.serviceAccountTokenCreator on target SA2. +// 1. Base service account SA1 has roles/iam.serviceAccountTokenCreator on +// DSA1. +// 2. DSA1 has roles/iam.serviceAccountTokenCreator on DSA2. +// 3. DSA2 has roles/iam.serviceAccountTokenCreator on target SA2. // // The resulting impersonated credential will either have the default scopes of // the client being instantiating or the scopes from WithScopes if provided. @@ -305,9 +310,9 @@ func (w withClientCertSource) Apply(o *internal.DialSettings) { // // This is an EXPERIMENTAL API and may be changed or removed in the future. // -// This option has been replaced by `impersonate` package: +// Deprecated: This option has been replaced by `impersonate` package: // `google.golang.org/api/impersonate`. Please use the `impersonate` package -// instead. +// instead with the WithTokenSource option. func ImpersonateCredentials(target string, delegates ...string) ClientOption { return impersonateServiceAccount{ target: target, diff --git a/vendor/google.golang.org/api/serviceusage/v1/serviceusage-api.json b/vendor/google.golang.org/api/serviceusage/v1/serviceusage-api.json index 0d5bff208..a3174d8b4 100644 --- a/vendor/google.golang.org/api/serviceusage/v1/serviceusage-api.json +++ b/vendor/google.golang.org/api/serviceusage/v1/serviceusage-api.json @@ -195,7 +195,7 @@ ] }, "list": { - "description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `UNIMPLEMENTED`. NOTE: the `name` binding allows API services to override the binding to use different resource name schemes, such as `users/*/operations`. To override the binding, API services can add a binding such as `\"/v1/{name=users/*}/operations\"` to their service configuration. For backwards compatibility, the default name includes the operations collection id, however overriding users must ensure the name binding is the parent resource, without the operations collection id.", + "description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `UNIMPLEMENTED`.", "flatPath": "v1/operations", "httpMethod": "GET", "id": "serviceusage.operations.list", @@ -426,7 +426,7 @@ } } }, - "revision": "20220615", + "revision": "20230309", "rootUrl": "https://serviceusage.googleapis.com/", "schemas": { "AdminQuotaPolicy": { @@ -501,11 +501,13 @@ "description": "The source syntax of the service.", "enum": [ "SYNTAX_PROTO2", - "SYNTAX_PROTO3" + "SYNTAX_PROTO3", + "SYNTAX_EDITIONS" ], "enumDescriptions": [ "Syntax `proto2`.", - "Syntax `proto3`." + "Syntax `proto3`.", + "Syntax `editions`." ], "type": "string" }, @@ -647,11 +649,23 @@ "description": "The JWT audience is used when generating a JWT ID token for the backend. This ID token will be added in the HTTP \"authorization\" header, and sent to the backend.", "type": "string" }, + "minDeadline": { + "description": "Deprecated, do not use.", + "format": "double", + "type": "number" + }, "operationDeadline": { "description": "The number of seconds to wait for the completion of a long running operation. The default is no deadline.", "format": "double", "type": "number" }, + "overridesByRequestProtocol": { + "additionalProperties": { + "$ref": "BackendRule" + }, + "description": "The map between request protocol and the backend address.", + "type": "object" + }, "pathTranslation": { "enum": [ "PATH_TRANSLATION_UNSPECIFIED", @@ -791,6 +805,105 @@ "properties": {}, "type": "object" }, + "ClientLibrarySettings": { + "description": "Details about how and where to publish client libraries.", + "id": "ClientLibrarySettings", + "properties": { + "cppSettings": { + "$ref": "CppSettings", + "description": "Settings for C++ client libraries." + }, + "dotnetSettings": { + "$ref": "DotnetSettings", + "description": "Settings for .NET client libraries." + }, + "goSettings": { + "$ref": "GoSettings", + "description": "Settings for Go client libraries." + }, + "javaSettings": { + "$ref": "JavaSettings", + "description": "Settings for legacy Java features, supported in the Service YAML." + }, + "launchStage": { + "description": "Launch stage of this version of the API.", + "enum": [ + "LAUNCH_STAGE_UNSPECIFIED", + "UNIMPLEMENTED", + "PRELAUNCH", + "EARLY_ACCESS", + "ALPHA", + "BETA", + "GA", + "DEPRECATED" + ], + "enumDescriptions": [ + "Do not use this default value.", + "The feature is not yet implemented. Users can not use it.", + "Prelaunch features are hidden from users and are only visible internally.", + "Early Access features are limited to a closed group of testers. To use these features, you must sign up in advance and sign a Trusted Tester agreement (which includes confidentiality provisions). These features may be unstable, changed in backward-incompatible ways, and are not guaranteed to be released.", + "Alpha is a limited availability test for releases before they are cleared for widespread use. By Alpha, all significant design issues are resolved and we are in the process of verifying functionality. Alpha customers need to apply for access, agree to applicable terms, and have their projects allowlisted. Alpha releases don't have to be feature complete, no SLAs are provided, and there are no technical support obligations, but they will be far enough along that customers can actually use them in test environments or for limited-use tests -- just like they would in normal production cases.", + "Beta is the point at which we are ready to open a release for any customer to use. There are no SLA or technical support obligations in a Beta release. Products will be complete from a feature perspective, but may have some open outstanding issues. Beta releases are suitable for limited production use cases.", + "GA features are open to all developers and are considered stable and fully qualified for production use.", + "Deprecated features are scheduled to be shut down and removed. For more information, see the \"Deprecation Policy\" section of our [Terms of Service](https://cloud.google.com/terms/) and the [Google Cloud Platform Subject to the Deprecation Policy](https://cloud.google.com/terms/deprecation) documentation." + ], + "type": "string" + }, + "nodeSettings": { + "$ref": "NodeSettings", + "description": "Settings for Node client libraries." + }, + "phpSettings": { + "$ref": "PhpSettings", + "description": "Settings for PHP client libraries." + }, + "pythonSettings": { + "$ref": "PythonSettings", + "description": "Settings for Python client libraries." + }, + "restNumericEnums": { + "description": "When using transport=rest, the client request will encode enums as numbers rather than strings.", + "type": "boolean" + }, + "rubySettings": { + "$ref": "RubySettings", + "description": "Settings for Ruby client libraries." + }, + "version": { + "description": "Version of the API to apply these settings to.", + "type": "string" + } + }, + "type": "object" + }, + "CommonLanguageSettings": { + "description": "Required information for every language.", + "id": "CommonLanguageSettings", + "properties": { + "destinations": { + "description": "The destination where API teams want this client library to be published.", + "items": { + "enum": [ + "CLIENT_LIBRARY_DESTINATION_UNSPECIFIED", + "GITHUB", + "PACKAGE_MANAGER" + ], + "enumDescriptions": [ + "Client libraries will neither be generated nor published to package managers.", + "Generate the client library in a repo under github.com/googleapis, but don't publish it to package managers.", + "Publish the library to package managers like nuget.org and npmjs.com." + ], + "type": "string" + }, + "type": "array" + }, + "referenceDocsUri": { + "description": "Link to automatically generated reference documentation. Example: https://cloud.google.com/nodejs/docs/reference/asset/latest", + "type": "string" + } + }, + "type": "object" + }, "Context": { "description": "`Context` defines which contexts an API requests. Example: context: rules: - selector: \"*\" requested: - google.rpc.context.ProjectContext - google.rpc.context.OriginContext The above specifies that all methods in the API request `google.rpc.context.ProjectContext` and `google.rpc.context.OriginContext`. Available context types are defined in package `google.rpc.context`. This also provides mechanism to allowlist any protobuf message extension that can be sent in grpc metadata using “x-goog-ext--bin” and “x-goog-ext--jspb” format. For example, list any service specific protobuf types that can appear in grpc metadata as follows in your yaml file: Example: context: rules: - selector: \"google.example.library.v1.LibraryService.CreateBook\" allowed_request_extensions: - google.foo.v1.NewExtension allowed_response_extensions: - google.foo.v1.NewExtension You can also specify extension ID instead of fully qualified extension name here.", "id": "Context", @@ -845,7 +958,7 @@ "type": "object" }, "Control": { - "description": "Selects and configures the service controller used by the service. The service controller handles two things: - **What is allowed:** for each API request, Chemist checks the project status, activation status, abuse status, billing status, service status, location restrictions, VPC Service Controls, SuperQuota, and other policies. - **What has happened:** for each API response, Chemist reports the telemetry data to analytics, auditing, billing, eventing, logging, monitoring, sawmill, and tracing. Chemist also accepts telemetry data not associated with API traffic, such as billing metrics. Example: control: environment: servicecontrol.googleapis.com", + "description": "Selects and configures the service controller used by the service. Example: control: environment: servicecontrol.googleapis.com", "id": "Control", "properties": { "environment": { @@ -855,6 +968,17 @@ }, "type": "object" }, + "CppSettings": { + "description": "Settings for C++ client libraries.", + "id": "CppSettings", + "properties": { + "common": { + "$ref": "CommonLanguageSettings", + "description": "Some settings." + } + }, + "type": "object" + }, "CreateAdminQuotaPolicyMetadata": { "description": "Metadata message that provides information such as progress, partial failures, and similar information on each GetOperation call of LRO returned by CreateAdminQuotaPolicy.", "id": "CreateAdminQuotaPolicyMetadata", @@ -1010,6 +1134,17 @@ }, "type": "object" }, + "DotnetSettings": { + "description": "Settings for Dotnet client libraries.", + "id": "DotnetSettings", + "properties": { + "common": { + "$ref": "CommonLanguageSettings", + "description": "Some settings." + } + }, + "type": "object" + }, "Empty": { "description": "A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); }", "id": "Empty", @@ -1052,6 +1187,13 @@ "description": "`Endpoint` describes a network address of a service that serves a set of APIs. It is commonly known as a service endpoint. A service may expose any number of service endpoints, and all service endpoints share the same service definition, such as quota limits and monitoring metrics. Example: type: google.api.Service name: library-example.googleapis.com endpoints: # Declares network address `https://library-example.googleapis.com` # for service `library-example.googleapis.com`. The `https` scheme # is implicit for all service endpoints. Other schemes may be # supported in the future. - name: library-example.googleapis.com allow_cors: false - name: content-staging-library-example.googleapis.com # Allows HTTP OPTIONS calls to be passed to the API frontend, for it # to decide whether the subsequent cross-origin request is allowed # to proceed. allow_cors: true", "id": "Endpoint", "properties": { + "aliases": { + "description": "Unimplemented. Dot not use. DEPRECATED: This field is no longer supported. Instead of using aliases, please specify multiple google.api.Endpoint for each of the intended aliases. Additional names that this endpoint will be hosted on.", + "items": { + "type": "string" + }, + "type": "array" + }, "allowCors": { "description": "Allowing [CORS](https://en.wikipedia.org/wiki/Cross-origin_resource_sharing), aka cross-domain traffic, would allow the backends served from this endpoint to receive and respond to HTTP OPTIONS requests. The response will be used by the browser to determine whether the subsequent cross-origin request is allowed to proceed.", "type": "boolean" @@ -1071,6 +1213,10 @@ "description": "Enum type definition.", "id": "Enum", "properties": { + "edition": { + "description": "The source edition string, only valid when syntax is SYNTAX_EDITIONS.", + "type": "string" + }, "enumvalue": { "description": "Enum value definitions.", "items": { @@ -1097,11 +1243,13 @@ "description": "The source syntax.", "enum": [ "SYNTAX_PROTO2", - "SYNTAX_PROTO3" + "SYNTAX_PROTO3", + "SYNTAX_EDITIONS" ], "enumDescriptions": [ "Syntax `proto2`.", - "Syntax `proto3`." + "Syntax `proto3`.", + "Syntax `editions`." ], "type": "string" } @@ -1266,6 +1414,17 @@ }, "type": "object" }, + "GoSettings": { + "description": "Settings for Go client libraries.", + "id": "GoSettings", + "properties": { + "common": { + "$ref": "CommonLanguageSettings", + "description": "Some settings." + } + }, + "type": "object" + }, "GoogleApiService": { "description": "`Service` is the root object of Google API service configuration (service config). It describes the basic information about a logical service, such as the service name and the user-facing title, and delegates other aspects to sub-sections. Each sub-section is either a proto message or a repeated proto message that configures a specific aspect, such as auth. For more information, see each proto message definition. Example: type: google.api.Service name: calendar.googleapis.com title: Google Calendar API apis: - name: google.calendar.v3.Calendar visibility: rules: - selector: \"google.calendar.v3.*\" restriction: PREVIEW backend: rules: - selector: \"google.calendar.v3.*\" address: calendar.example.com authentication: providers: - id: google_calendar_auth jwks_uri: https://www.googleapis.com/oauth2/v1/certs issuer: https://securetoken.google.com rules: - selector: \"*\" requirements: provider_id: google_calendar_auth", "id": "GoogleApiService", @@ -1369,6 +1528,10 @@ "description": "The Google project that owns this service.", "type": "string" }, + "publishing": { + "$ref": "Publishing", + "description": "Settings for [Google Cloud Client libraries](https://cloud.google.com/apis/docs/cloud-client-libraries) generated from APIs defined as protocol buffers." + }, "quota": { "$ref": "Quota", "description": "Quota configuration." @@ -1675,6 +1838,28 @@ }, "type": "object" }, + "JavaSettings": { + "description": "Settings for Java client libraries.", + "id": "JavaSettings", + "properties": { + "common": { + "$ref": "CommonLanguageSettings", + "description": "Some settings." + }, + "libraryPackage": { + "description": "The package name to use in Java. Clobbers the java_package option set in the protobuf. This should be used **only** by APIs who have already set the language_settings.java.package_name\" field in gapic.yaml. API teams should use the protobuf java_package option where possible. Example of a YAML configuration:: publishing: java_settings: library_package: com.google.cloud.pubsub.v1", + "type": "string" + }, + "serviceClassNames": { + "additionalProperties": { + "type": "string" + }, + "description": "Configure the Java class name to use instead of the service's for its corresponding generated GAPIC client. Keys are fully-qualified service names as they appear in the protobuf (including the full the language_settings.java.interface_names\" field in gapic.yaml. API teams should otherwise use the service name as it appears in the protobuf. Example of a YAML configuration:: publishing: java_settings: service_class_names: - google.pubsub.v1.Publisher: TopicAdmin - google.pubsub.v1.Subscriber: SubscriptionAdmin", + "type": "object" + } + }, + "type": "object" + }, "JwtLocation": { "description": "Specifies a location to extract JWT from an API request.", "id": "JwtLocation", @@ -1828,6 +2013,33 @@ }, "type": "object" }, + "LongRunning": { + "description": "Describes settings to use when generating API methods that use the long-running operation pattern. All default values below are from those used in the client library generators (e.g. [Java](https://github.com/googleapis/gapic-generator-java/blob/04c2faa191a9b5a10b92392fe8482279c4404803/src/main/java/com/google/api/generator/gapic/composer/common/RetrySettingsComposer.java)).", + "id": "LongRunning", + "properties": { + "initialPollDelay": { + "description": "Initial delay after which the first poll request will be made. Default value: 5 seconds.", + "format": "google-duration", + "type": "string" + }, + "maxPollDelay": { + "description": "Maximum time between two subsequent poll requests. Default value: 45 seconds.", + "format": "google-duration", + "type": "string" + }, + "pollDelayMultiplier": { + "description": "Multiplier to gradually increase delay between subsequent polls until it reaches max_poll_delay. Default value: 1.5.", + "format": "float", + "type": "number" + }, + "totalPollTimeout": { + "description": "Total polling timeout. Default value: 5 minutes.", + "format": "google-duration", + "type": "string" + } + }, + "type": "object" + }, "Method": { "description": "Method represents a method of an API interface.", "id": "Method", @@ -1863,17 +2075,34 @@ "description": "The source syntax of this method.", "enum": [ "SYNTAX_PROTO2", - "SYNTAX_PROTO3" + "SYNTAX_PROTO3", + "SYNTAX_EDITIONS" ], "enumDescriptions": [ "Syntax `proto2`.", - "Syntax `proto3`." + "Syntax `proto3`.", + "Syntax `editions`." ], "type": "string" } }, "type": "object" }, + "MethodSettings": { + "description": "Describes the generator configuration for a method.", + "id": "MethodSettings", + "properties": { + "longRunning": { + "$ref": "LongRunning", + "description": "Describes settings to use for long-running operations when generating API methods for RPCs. Complements RPCs that use the annotations in google/longrunning/operations.proto. Example of a YAML configuration:: publishing: method_behavior: - selector: CreateAdDomain long_running: initial_poll_delay: seconds: 60 # 1 minute poll_delay_multiplier: 1.5 max_poll_delay: seconds: 360 # 6 minutes total_poll_timeout: seconds: 54000 # 90 minutes" + }, + "selector": { + "description": "The fully qualified name of the method, for which the options below apply. This is used to find the method to apply the options.", + "type": "string" + } + }, + "type": "object" + }, "MetricDescriptor": { "description": "Defines a metric type and its schema. Once a metric descriptor is created, deleting or altering it stops data collection and makes the metric type's existing data unusable. ", "id": "MetricDescriptor", @@ -2149,6 +2378,17 @@ }, "type": "object" }, + "NodeSettings": { + "description": "Settings for Node client libraries.", + "id": "NodeSettings", + "properties": { + "common": { + "$ref": "CommonLanguageSettings", + "description": "Some settings." + } + }, + "type": "object" + }, "OAuthRequirements": { "description": "OAuth scopes are a way to define data and permissions on data. For example, there are scopes defined for \"Read-only access to Google Calendar\" and \"Access to Cloud Platform\". Users can consent to a scope for an application, giving it permission to access that data on their behalf. OAuth scope specifications should be fairly coarse grained; a user will need to see and understand the text description of what your scope means. In most cases: use one or at most two OAuth scopes for an entire family of products. If your product has multiple APIs, you should probably be sharing the OAuth scope across all of those APIs. When you need finer grained OAuth consent screens: talk with your product management about how developers will use them in practice. Please note that even though each of the canonical scopes is enough for a request to be accepted and passed to the backend, a request can still fail due to the backend requiring additional scopes or permissions.", "id": "OAuthRequirements", @@ -2250,19 +2490,111 @@ }, "type": "object" }, + "PhpSettings": { + "description": "Settings for Php client libraries.", + "id": "PhpSettings", + "properties": { + "common": { + "$ref": "CommonLanguageSettings", + "description": "Some settings." + } + }, + "type": "object" + }, + "Publishing": { + "description": "This message configures the settings for publishing [Google Cloud Client libraries](https://cloud.google.com/apis/docs/cloud-client-libraries) generated from the service config.", + "id": "Publishing", + "properties": { + "apiShortName": { + "description": "Used as a tracking tag when collecting data about the APIs developer relations artifacts like docs, packages delivered to package managers, etc. Example: \"speech\".", + "type": "string" + }, + "codeownerGithubTeams": { + "description": "GitHub teams to be added to CODEOWNERS in the directory in GitHub containing source code for the client libraries for this API.", + "items": { + "type": "string" + }, + "type": "array" + }, + "docTagPrefix": { + "description": "A prefix used in sample code when demarking regions to be included in documentation.", + "type": "string" + }, + "documentationUri": { + "description": "Link to product home page. Example: https://cloud.google.com/asset-inventory/docs/overview", + "type": "string" + }, + "githubLabel": { + "description": "GitHub label to apply to issues and pull requests opened for this API.", + "type": "string" + }, + "librarySettings": { + "description": "Client library settings. If the same version string appears multiple times in this list, then the last one wins. Settings from earlier settings with the same version string are discarded.", + "items": { + "$ref": "ClientLibrarySettings" + }, + "type": "array" + }, + "methodSettings": { + "description": "A list of API method settings, e.g. the behavior for methods that use the long-running operation pattern.", + "items": { + "$ref": "MethodSettings" + }, + "type": "array" + }, + "newIssueUri": { + "description": "Link to a place that API users can report issues. Example: https://issuetracker.google.com/issues/new?component=190865\u0026template=1161103", + "type": "string" + }, + "organization": { + "description": "For whom the client library is being published.", + "enum": [ + "CLIENT_LIBRARY_ORGANIZATION_UNSPECIFIED", + "CLOUD", + "ADS", + "PHOTOS", + "STREET_VIEW" + ], + "enumDescriptions": [ + "Not useful.", + "Google Cloud Platform Org.", + "Ads (Advertising) Org.", + "Photos Org.", + "Street View Org." + ], + "type": "string" + }, + "protoReferenceDocumentationUri": { + "description": "Optional link to proto reference documentation. Example: https://cloud.google.com/pubsub/lite/docs/reference/rpc", + "type": "string" + } + }, + "type": "object" + }, + "PythonSettings": { + "description": "Settings for Python client libraries.", + "id": "PythonSettings", + "properties": { + "common": { + "$ref": "CommonLanguageSettings", + "description": "Some settings." + } + }, + "type": "object" + }, "Quota": { - "description": "Quota configuration helps to achieve fairness and budgeting in service usage. The metric based quota configuration works this way: - The service configuration defines a set of metrics. - For API calls, the quota.metric_rules maps methods to metrics with corresponding costs. - The quota.limits defines limits on the metrics, which will be used for quota checks at runtime. An example quota configuration in yaml format: quota: limits: - name: apiWriteQpsPerProject metric: library.googleapis.com/write_calls unit: \"1/min/{project}\" # rate limit for consumer projects values: STANDARD: 10000 # The metric rules bind all methods to the read_calls metric, # except for the UpdateBook and DeleteBook methods. These two methods # are mapped to the write_calls metric, with the UpdateBook method # consuming at twice rate as the DeleteBook method. metric_rules: - selector: \"*\" metric_costs: library.googleapis.com/read_calls: 1 - selector: google.example.library.v1.LibraryService.UpdateBook metric_costs: library.googleapis.com/write_calls: 2 - selector: google.example.library.v1.LibraryService.DeleteBook metric_costs: library.googleapis.com/write_calls: 1 Corresponding Metric definition: metrics: - name: library.googleapis.com/read_calls display_name: Read requests metric_kind: DELTA value_type: INT64 - name: library.googleapis.com/write_calls display_name: Write requests metric_kind: DELTA value_type: INT64 ", + "description": "Quota configuration helps to achieve fairness and budgeting in service usage. The metric based quota configuration works this way: - The service configuration defines a set of metrics. - For API calls, the quota.metric_rules maps methods to metrics with corresponding costs. - The quota.limits defines limits on the metrics, which will be used for quota checks at runtime. An example quota configuration in yaml format: quota: limits: - name: apiWriteQpsPerProject metric: library.googleapis.com/write_calls unit: \"1/min/{project}\" # rate limit for consumer projects values: STANDARD: 10000 (The metric rules bind all methods to the read_calls metric, except for the UpdateBook and DeleteBook methods. These two methods are mapped to the write_calls metric, with the UpdateBook method consuming at twice rate as the DeleteBook method.) metric_rules: - selector: \"*\" metric_costs: library.googleapis.com/read_calls: 1 - selector: google.example.library.v1.LibraryService.UpdateBook metric_costs: library.googleapis.com/write_calls: 2 - selector: google.example.library.v1.LibraryService.DeleteBook metric_costs: library.googleapis.com/write_calls: 1 Corresponding Metric definition: metrics: - name: library.googleapis.com/read_calls display_name: Read requests metric_kind: DELTA value_type: INT64 - name: library.googleapis.com/write_calls display_name: Write requests metric_kind: DELTA value_type: INT64 ", "id": "Quota", "properties": { "limits": { - "description": "List of `QuotaLimit` definitions for the service.", + "description": "List of QuotaLimit definitions for the service.", "items": { "$ref": "QuotaLimit" }, "type": "array" }, "metricRules": { - "description": "List of `MetricRule` definitions, each one mapping a selected method to one or more metrics.", + "description": "List of MetricRule definitions, each one mapping a selected method to one or more metrics.", "items": { "$ref": "MetricRule" }, @@ -2360,6 +2692,17 @@ }, "type": "object" }, + "RubySettings": { + "description": "Settings for Ruby client libraries.", + "id": "RubySettings", + "properties": { + "common": { + "$ref": "CommonLanguageSettings", + "description": "Some settings." + } + }, + "type": "object" + }, "ServiceIdentity": { "description": "Service identity for a service. This is the identity that service producer should use to access consumer resources.", "id": "ServiceIdentity", @@ -2486,6 +2829,10 @@ "description": "A protocol buffer message type.", "id": "Type", "properties": { + "edition": { + "description": "The source edition string, only valid when syntax is SYNTAX_EDITIONS.", + "type": "string" + }, "fields": { "description": "The list of fields.", "items": { @@ -2519,11 +2866,13 @@ "description": "The source syntax.", "enum": [ "SYNTAX_PROTO2", - "SYNTAX_PROTO3" + "SYNTAX_PROTO3", + "SYNTAX_EDITIONS" ], "enumDescriptions": [ "Syntax `proto2`.", - "Syntax `proto3`." + "Syntax `proto3`.", + "Syntax `editions`." ], "type": "string" } diff --git a/vendor/google.golang.org/api/serviceusage/v1/serviceusage-gen.go b/vendor/google.golang.org/api/serviceusage/v1/serviceusage-gen.go index 2f69383fd..a713d49b7 100644 --- a/vendor/google.golang.org/api/serviceusage/v1/serviceusage-gen.go +++ b/vendor/google.golang.org/api/serviceusage/v1/serviceusage-gen.go @@ -1,4 +1,4 @@ -// Copyright 2022 Google LLC. +// Copyright 2023 Google LLC. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -8,35 +8,35 @@ // // For product documentation, see: https://cloud.google.com/service-usage/ // -// Creating a client +// # Creating a client // // Usage example: // -// import "google.golang.org/api/serviceusage/v1" -// ... -// ctx := context.Background() -// serviceusageService, err := serviceusage.NewService(ctx) +// import "google.golang.org/api/serviceusage/v1" +// ... +// ctx := context.Background() +// serviceusageService, err := serviceusage.NewService(ctx) // // In this example, Google Application Default Credentials are used for authentication. // // For information on how to create and obtain Application Default Credentials, see https://developers.google.com/identity/protocols/application-default-credentials. // -// Other authentication options +// # Other authentication options // // By default, all available scopes (see "Constants") are used to authenticate. To restrict scopes, use option.WithScopes: // -// serviceusageService, err := serviceusage.NewService(ctx, option.WithScopes(serviceusage.ServiceManagementScope)) +// serviceusageService, err := serviceusage.NewService(ctx, option.WithScopes(serviceusage.ServiceManagementScope)) // // To use an API key for authentication (note: some APIs do not support API keys), use option.WithAPIKey: // -// serviceusageService, err := serviceusage.NewService(ctx, option.WithAPIKey("AIza...")) +// serviceusageService, err := serviceusage.NewService(ctx, option.WithAPIKey("AIza...")) // // To use an OAuth token (e.g., a user token obtained via a three-legged OAuth flow), use option.WithTokenSource: // -// config := &oauth2.Config{...} -// // ... -// token, err := config.Exchange(ctx, ...) -// serviceusageService, err := serviceusage.NewService(ctx, option.WithTokenSource(config.TokenSource(ctx, token))) +// config := &oauth2.Config{...} +// // ... +// token, err := config.Exchange(ctx, ...) +// serviceusageService, err := serviceusage.NewService(ctx, option.WithTokenSource(config.TokenSource(ctx, token))) // // See https://godoc.org/google.golang.org/api/option/ for details on options. package serviceusage // import "google.golang.org/api/serviceusage/v1" @@ -264,6 +264,7 @@ type Api struct { // Possible values: // "SYNTAX_PROTO2" - Syntax `proto2`. // "SYNTAX_PROTO3" - Syntax `proto3`. + // "SYNTAX_EDITIONS" - Syntax `editions`. Syntax string `json:"syntax,omitempty"` // Version: A version string for this interface. If specified, must have @@ -582,10 +583,17 @@ type BackendRule struct { // "authorization" header, and sent to the backend. JwtAudience string `json:"jwtAudience,omitempty"` + // MinDeadline: Deprecated, do not use. + MinDeadline float64 `json:"minDeadline,omitempty"` + // OperationDeadline: The number of seconds to wait for the completion // of a long running operation. The default is no deadline. OperationDeadline float64 `json:"operationDeadline,omitempty"` + // OverridesByRequestProtocol: The map between request protocol and the + // backend address. + OverridesByRequestProtocol map[string]BackendRule `json:"overridesByRequestProtocol,omitempty"` + // Possible values: // "PATH_TRANSLATION_UNSPECIFIED" // "CONSTANT_ADDRESS" - Use the backend address as-is, with no @@ -657,6 +665,7 @@ func (s *BackendRule) UnmarshalJSON(data []byte) error { type NoMethod BackendRule var s1 struct { Deadline gensupport.JSONFloat64 `json:"deadline"` + MinDeadline gensupport.JSONFloat64 `json:"minDeadline"` OperationDeadline gensupport.JSONFloat64 `json:"operationDeadline"` *NoMethod } @@ -665,6 +674,7 @@ func (s *BackendRule) UnmarshalJSON(data []byte) error { return err } s.Deadline = float64(s1.Deadline) + s.MinDeadline = float64(s1.MinDeadline) s.OperationDeadline = float64(s1.OperationDeadline) return nil } @@ -918,6 +928,143 @@ func (s *BillingDestination) MarshalJSON() ([]byte, error) { type CancelOperationRequest struct { } +// ClientLibrarySettings: Details about how and where to publish client +// libraries. +type ClientLibrarySettings struct { + // CppSettings: Settings for C++ client libraries. + CppSettings *CppSettings `json:"cppSettings,omitempty"` + + // DotnetSettings: Settings for .NET client libraries. + DotnetSettings *DotnetSettings `json:"dotnetSettings,omitempty"` + + // GoSettings: Settings for Go client libraries. + GoSettings *GoSettings `json:"goSettings,omitempty"` + + // JavaSettings: Settings for legacy Java features, supported in the + // Service YAML. + JavaSettings *JavaSettings `json:"javaSettings,omitempty"` + + // LaunchStage: Launch stage of this version of the API. + // + // Possible values: + // "LAUNCH_STAGE_UNSPECIFIED" - Do not use this default value. + // "UNIMPLEMENTED" - The feature is not yet implemented. Users can not + // use it. + // "PRELAUNCH" - Prelaunch features are hidden from users and are only + // visible internally. + // "EARLY_ACCESS" - Early Access features are limited to a closed + // group of testers. To use these features, you must sign up in advance + // and sign a Trusted Tester agreement (which includes confidentiality + // provisions). These features may be unstable, changed in + // backward-incompatible ways, and are not guaranteed to be released. + // "ALPHA" - Alpha is a limited availability test for releases before + // they are cleared for widespread use. By Alpha, all significant design + // issues are resolved and we are in the process of verifying + // functionality. Alpha customers need to apply for access, agree to + // applicable terms, and have their projects allowlisted. Alpha releases + // don't have to be feature complete, no SLAs are provided, and there + // are no technical support obligations, but they will be far enough + // along that customers can actually use them in test environments or + // for limited-use tests -- just like they would in normal production + // cases. + // "BETA" - Beta is the point at which we are ready to open a release + // for any customer to use. There are no SLA or technical support + // obligations in a Beta release. Products will be complete from a + // feature perspective, but may have some open outstanding issues. Beta + // releases are suitable for limited production use cases. + // "GA" - GA features are open to all developers and are considered + // stable and fully qualified for production use. + // "DEPRECATED" - Deprecated features are scheduled to be shut down + // and removed. For more information, see the "Deprecation Policy" + // section of our [Terms of Service](https://cloud.google.com/terms/) + // and the [Google Cloud Platform Subject to the Deprecation + // Policy](https://cloud.google.com/terms/deprecation) documentation. + LaunchStage string `json:"launchStage,omitempty"` + + // NodeSettings: Settings for Node client libraries. + NodeSettings *NodeSettings `json:"nodeSettings,omitempty"` + + // PhpSettings: Settings for PHP client libraries. + PhpSettings *PhpSettings `json:"phpSettings,omitempty"` + + // PythonSettings: Settings for Python client libraries. + PythonSettings *PythonSettings `json:"pythonSettings,omitempty"` + + // RestNumericEnums: When using transport=rest, the client request will + // encode enums as numbers rather than strings. + RestNumericEnums bool `json:"restNumericEnums,omitempty"` + + // RubySettings: Settings for Ruby client libraries. + RubySettings *RubySettings `json:"rubySettings,omitempty"` + + // Version: Version of the API to apply these settings to. + Version string `json:"version,omitempty"` + + // ForceSendFields is a list of field names (e.g. "CppSettings") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CppSettings") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ClientLibrarySettings) MarshalJSON() ([]byte, error) { + type NoMethod ClientLibrarySettings + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// CommonLanguageSettings: Required information for every language. +type CommonLanguageSettings struct { + // Destinations: The destination where API teams want this client + // library to be published. + // + // Possible values: + // "CLIENT_LIBRARY_DESTINATION_UNSPECIFIED" - Client libraries will + // neither be generated nor published to package managers. + // "GITHUB" - Generate the client library in a repo under + // github.com/googleapis, but don't publish it to package managers. + // "PACKAGE_MANAGER" - Publish the library to package managers like + // nuget.org and npmjs.com. + Destinations []string `json:"destinations,omitempty"` + + // ReferenceDocsUri: Link to automatically generated reference + // documentation. Example: + // https://cloud.google.com/nodejs/docs/reference/asset/latest + ReferenceDocsUri string `json:"referenceDocsUri,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Destinations") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Destinations") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *CommonLanguageSettings) MarshalJSON() ([]byte, error) { + type NoMethod CommonLanguageSettings + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // Context: `Context` defines which contexts an API requests. Example: // context: rules: - selector: "*" requested: - // google.rpc.context.ProjectContext - google.rpc.context.OriginContext @@ -1011,15 +1158,7 @@ func (s *ContextRule) MarshalJSON() ([]byte, error) { } // Control: Selects and configures the service controller used by the -// service. The service controller handles two things: - **What is -// allowed:** for each API request, Chemist checks the project status, -// activation status, abuse status, billing status, service status, -// location restrictions, VPC Service Controls, SuperQuota, and other -// policies. - **What has happened:** for each API response, Chemist -// reports the telemetry data to analytics, auditing, billing, eventing, -// logging, monitoring, sawmill, and tracing. Chemist also accepts -// telemetry data not associated with API traffic, such as billing -// metrics. Example: control: environment: servicecontrol.googleapis.com +// service. Example: control: environment: servicecontrol.googleapis.com type Control struct { // Environment: The service controller environment to use. If empty, no // control plane feature (like quota and billing) will be enabled. The @@ -1049,6 +1188,34 @@ func (s *Control) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// CppSettings: Settings for C++ client libraries. +type CppSettings struct { + // Common: Some settings. + Common *CommonLanguageSettings `json:"common,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Common") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Common") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *CppSettings) MarshalJSON() ([]byte, error) { + type NoMethod CppSettings + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // CreateAdminQuotaPolicyMetadata: Metadata message that provides // information such as progress, partial failures, and similar // information on each GetOperation call of LRO returned by @@ -1380,6 +1547,34 @@ func (s *DocumentationRule) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// DotnetSettings: Settings for Dotnet client libraries. +type DotnetSettings struct { + // Common: Some settings. + Common *CommonLanguageSettings `json:"common,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Common") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Common") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *DotnetSettings) MarshalJSON() ([]byte, error) { + type NoMethod DotnetSettings + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // Empty: A generic empty message that you can re-use to avoid defining // duplicated empty messages in your APIs. A typical example is to use // it as the request or the response type of an API method. For @@ -1472,6 +1667,12 @@ func (s *EnableServiceResponse) MarshalJSON() ([]byte, error) { // whether the subsequent cross-origin request is allowed # to proceed. // allow_cors: true type Endpoint struct { + // Aliases: Unimplemented. Dot not use. DEPRECATED: This field is no + // longer supported. Instead of using aliases, please specify multiple + // google.api.Endpoint for each of the intended aliases. Additional + // names that this endpoint will be hosted on. + Aliases []string `json:"aliases,omitempty"` + // AllowCors: Allowing CORS // (https://en.wikipedia.org/wiki/Cross-origin_resource_sharing), aka // cross-domain traffic, would allow the backends served from this @@ -1490,7 +1691,7 @@ type Endpoint struct { // "8.8.8.8" or "myservice.appspot.com". Target string `json:"target,omitempty"` - // ForceSendFields is a list of field names (e.g. "AllowCors") to + // ForceSendFields is a list of field names (e.g. "Aliases") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -1498,7 +1699,7 @@ type Endpoint struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "AllowCors") to include in + // NullFields is a list of field names (e.g. "Aliases") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -1515,6 +1716,10 @@ func (s *Endpoint) MarshalJSON() ([]byte, error) { // Enum: Enum type definition. type Enum struct { + // Edition: The source edition string, only valid when syntax is + // SYNTAX_EDITIONS. + Edition string `json:"edition,omitempty"` + // Enumvalue: Enum value definitions. Enumvalue []*EnumValue `json:"enumvalue,omitempty"` @@ -1532,9 +1737,10 @@ type Enum struct { // Possible values: // "SYNTAX_PROTO2" - Syntax `proto2`. // "SYNTAX_PROTO3" - Syntax `proto3`. + // "SYNTAX_EDITIONS" - Syntax `editions`. Syntax string `json:"syntax,omitempty"` - // ForceSendFields is a list of field names (e.g. "Enumvalue") to + // ForceSendFields is a list of field names (e.g. "Edition") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -1542,7 +1748,7 @@ type Enum struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Enumvalue") to include in + // NullFields is a list of field names (e.g. "Edition") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -1724,6 +1930,34 @@ func (s *GetServiceIdentityResponse) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// GoSettings: Settings for Go client libraries. +type GoSettings struct { + // Common: Some settings. + Common *CommonLanguageSettings `json:"common,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Common") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Common") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoSettings) MarshalJSON() ([]byte, error) { + type NoMethod GoSettings + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // GoogleApiService: `Service` is the root object of Google API service // configuration (service config). It describes the basic information // about a logical service, such as the service name and the user-facing @@ -1820,6 +2054,11 @@ type GoogleApiService struct { // ProducerProjectId: The Google project that owns this service. ProducerProjectId string `json:"producerProjectId,omitempty"` + // Publishing: Settings for Google Cloud Client libraries + // (https://cloud.google.com/apis/docs/cloud-client-libraries) generated + // from APIs defined as protocol buffers. + Publishing *Publishing `json:"publishing,omitempty"` + // Quota: Quota configuration. Quota *Quota `json:"quota,omitempty"` @@ -2473,6 +2712,53 @@ func (s *ImportConsumerOverridesResponse) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// JavaSettings: Settings for Java client libraries. +type JavaSettings struct { + // Common: Some settings. + Common *CommonLanguageSettings `json:"common,omitempty"` + + // LibraryPackage: The package name to use in Java. Clobbers the + // java_package option set in the protobuf. This should be used **only** + // by APIs who have already set the language_settings.java.package_name" + // field in gapic.yaml. API teams should use the protobuf java_package + // option where possible. Example of a YAML configuration:: publishing: + // java_settings: library_package: com.google.cloud.pubsub.v1 + LibraryPackage string `json:"libraryPackage,omitempty"` + + // ServiceClassNames: Configure the Java class name to use instead of + // the service's for its corresponding generated GAPIC client. Keys are + // fully-qualified service names as they appear in the protobuf + // (including the full the language_settings.java.interface_names" field + // in gapic.yaml. API teams should otherwise use the service name as it + // appears in the protobuf. Example of a YAML configuration:: + // publishing: java_settings: service_class_names: - + // google.pubsub.v1.Publisher: TopicAdmin - google.pubsub.v1.Subscriber: + // SubscriptionAdmin + ServiceClassNames map[string]string `json:"serviceClassNames,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Common") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Common") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *JavaSettings) MarshalJSON() ([]byte, error) { + type NoMethod JavaSettings + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // JwtLocation: Specifies a location to extract JWT from an API request. type JwtLocation struct { // Cookie: Specifies cookie name to extract JWT token. @@ -2763,6 +3049,64 @@ func (s *LoggingDestination) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// LongRunning: Describes settings to use when generating API methods +// that use the long-running operation pattern. All default values below +// are from those used in the client library generators (e.g. Java +// (https://github.com/googleapis/gapic-generator-java/blob/04c2faa191a9b5a10b92392fe8482279c4404803/src/main/java/com/google/api/generator/gapic/composer/common/RetrySettingsComposer.java)). +type LongRunning struct { + // InitialPollDelay: Initial delay after which the first poll request + // will be made. Default value: 5 seconds. + InitialPollDelay string `json:"initialPollDelay,omitempty"` + + // MaxPollDelay: Maximum time between two subsequent poll requests. + // Default value: 45 seconds. + MaxPollDelay string `json:"maxPollDelay,omitempty"` + + // PollDelayMultiplier: Multiplier to gradually increase delay between + // subsequent polls until it reaches max_poll_delay. Default value: 1.5. + PollDelayMultiplier float64 `json:"pollDelayMultiplier,omitempty"` + + // TotalPollTimeout: Total polling timeout. Default value: 5 minutes. + TotalPollTimeout string `json:"totalPollTimeout,omitempty"` + + // ForceSendFields is a list of field names (e.g. "InitialPollDelay") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "InitialPollDelay") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *LongRunning) MarshalJSON() ([]byte, error) { + type NoMethod LongRunning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +func (s *LongRunning) UnmarshalJSON(data []byte) error { + type NoMethod LongRunning + var s1 struct { + PollDelayMultiplier gensupport.JSONFloat64 `json:"pollDelayMultiplier"` + *NoMethod + } + s1.NoMethod = (*NoMethod)(s) + if err := json.Unmarshal(data, &s1); err != nil { + return err + } + s.PollDelayMultiplier = float64(s1.PollDelayMultiplier) + return nil +} + // Method: Method represents a method of an API interface. type Method struct { // Name: The simple name of this method. @@ -2788,6 +3132,7 @@ type Method struct { // Possible values: // "SYNTAX_PROTO2" - Syntax `proto2`. // "SYNTAX_PROTO3" - Syntax `proto3`. + // "SYNTAX_EDITIONS" - Syntax `editions`. Syntax string `json:"syntax,omitempty"` // ForceSendFields is a list of field names (e.g. "Name") to @@ -2813,6 +3158,45 @@ func (s *Method) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// MethodSettings: Describes the generator configuration for a method. +type MethodSettings struct { + // LongRunning: Describes settings to use for long-running operations + // when generating API methods for RPCs. Complements RPCs that use the + // annotations in google/longrunning/operations.proto. Example of a YAML + // configuration:: publishing: method_behavior: - selector: + // CreateAdDomain long_running: initial_poll_delay: seconds: 60 # 1 + // minute poll_delay_multiplier: 1.5 max_poll_delay: seconds: 360 # 6 + // minutes total_poll_timeout: seconds: 54000 # 90 minutes + LongRunning *LongRunning `json:"longRunning,omitempty"` + + // Selector: The fully qualified name of the method, for which the + // options below apply. This is used to find the method to apply the + // options. + Selector string `json:"selector,omitempty"` + + // ForceSendFields is a list of field names (e.g. "LongRunning") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "LongRunning") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *MethodSettings) MarshalJSON() ([]byte, error) { + type NoMethod MethodSettings + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // MetricDescriptor: Defines a metric type and its schema. Once a metric // descriptor is created, deleting or altering it stops data collection // and makes the metric type's existing data unusable. @@ -3390,6 +3774,34 @@ func (s *MonitoringDestination) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// NodeSettings: Settings for Node client libraries. +type NodeSettings struct { + // Common: Some settings. + Common *CommonLanguageSettings `json:"common,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Common") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Common") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *NodeSettings) MarshalJSON() ([]byte, error) { + type NoMethod NodeSettings + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // OAuthRequirements: OAuth scopes are a way to define data and // permissions on data. For example, there are scopes defined for // "Read-only access to Google Calendar" and "Access to Cloud Platform". @@ -3613,6 +4025,141 @@ func (s *Page) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// PhpSettings: Settings for Php client libraries. +type PhpSettings struct { + // Common: Some settings. + Common *CommonLanguageSettings `json:"common,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Common") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Common") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *PhpSettings) MarshalJSON() ([]byte, error) { + type NoMethod PhpSettings + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Publishing: This message configures the settings for publishing +// Google Cloud Client libraries +// (https://cloud.google.com/apis/docs/cloud-client-libraries) generated +// from the service config. +type Publishing struct { + // ApiShortName: Used as a tracking tag when collecting data about the + // APIs developer relations artifacts like docs, packages delivered to + // package managers, etc. Example: "speech". + ApiShortName string `json:"apiShortName,omitempty"` + + // CodeownerGithubTeams: GitHub teams to be added to CODEOWNERS in the + // directory in GitHub containing source code for the client libraries + // for this API. + CodeownerGithubTeams []string `json:"codeownerGithubTeams,omitempty"` + + // DocTagPrefix: A prefix used in sample code when demarking regions to + // be included in documentation. + DocTagPrefix string `json:"docTagPrefix,omitempty"` + + // DocumentationUri: Link to product home page. Example: + // https://cloud.google.com/asset-inventory/docs/overview + DocumentationUri string `json:"documentationUri,omitempty"` + + // GithubLabel: GitHub label to apply to issues and pull requests opened + // for this API. + GithubLabel string `json:"githubLabel,omitempty"` + + // LibrarySettings: Client library settings. If the same version string + // appears multiple times in this list, then the last one wins. Settings + // from earlier settings with the same version string are discarded. + LibrarySettings []*ClientLibrarySettings `json:"librarySettings,omitempty"` + + // MethodSettings: A list of API method settings, e.g. the behavior for + // methods that use the long-running operation pattern. + MethodSettings []*MethodSettings `json:"methodSettings,omitempty"` + + // NewIssueUri: Link to a place that API users can report issues. + // Example: + // https://issuetracker.google.com/issues/new?component=190865&template=1161103 + NewIssueUri string `json:"newIssueUri,omitempty"` + + // Organization: For whom the client library is being published. + // + // Possible values: + // "CLIENT_LIBRARY_ORGANIZATION_UNSPECIFIED" - Not useful. + // "CLOUD" - Google Cloud Platform Org. + // "ADS" - Ads (Advertising) Org. + // "PHOTOS" - Photos Org. + // "STREET_VIEW" - Street View Org. + Organization string `json:"organization,omitempty"` + + // ProtoReferenceDocumentationUri: Optional link to proto reference + // documentation. Example: + // https://cloud.google.com/pubsub/lite/docs/reference/rpc + ProtoReferenceDocumentationUri string `json:"protoReferenceDocumentationUri,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ApiShortName") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ApiShortName") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Publishing) MarshalJSON() ([]byte, error) { + type NoMethod Publishing + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// PythonSettings: Settings for Python client libraries. +type PythonSettings struct { + // Common: Some settings. + Common *CommonLanguageSettings `json:"common,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Common") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Common") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *PythonSettings) MarshalJSON() ([]byte, error) { + type NoMethod PythonSettings + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // Quota: Quota configuration helps to achieve fairness and budgeting in // service usage. The metric based quota configuration works this way: - // The service configuration defines a set of metrics. - For API calls, @@ -3621,11 +4168,11 @@ func (s *Page) MarshalJSON() ([]byte, error) { // be used for quota checks at runtime. An example quota configuration // in yaml format: quota: limits: - name: apiWriteQpsPerProject metric: // library.googleapis.com/write_calls unit: "1/min/{project}" # rate -// limit for consumer projects values: STANDARD: 10000 # The metric -// rules bind all methods to the read_calls metric, # except for the -// UpdateBook and DeleteBook methods. These two methods # are mapped to -// the write_calls metric, with the UpdateBook method # consuming at -// twice rate as the DeleteBook method. metric_rules: - selector: "*" +// limit for consumer projects values: STANDARD: 10000 (The metric rules +// bind all methods to the read_calls metric, except for the UpdateBook +// and DeleteBook methods. These two methods are mapped to the +// write_calls metric, with the UpdateBook method consuming at twice +// rate as the DeleteBook method.) metric_rules: - selector: "*" // metric_costs: library.googleapis.com/read_calls: 1 - selector: // google.example.library.v1.LibraryService.UpdateBook metric_costs: // library.googleapis.com/write_calls: 2 - selector: @@ -3636,10 +4183,10 @@ func (s *Page) MarshalJSON() ([]byte, error) { // name: library.googleapis.com/write_calls display_name: Write requests // metric_kind: DELTA value_type: INT64 type Quota struct { - // Limits: List of `QuotaLimit` definitions for the service. + // Limits: List of QuotaLimit definitions for the service. Limits []*QuotaLimit `json:"limits,omitempty"` - // MetricRules: List of `MetricRule` definitions, each one mapping a + // MetricRules: List of MetricRule definitions, each one mapping a // selected method to one or more metrics. MetricRules []*MetricRule `json:"metricRules,omitempty"` @@ -3837,6 +4384,34 @@ func (s *QuotaOverride) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// RubySettings: Settings for Ruby client libraries. +type RubySettings struct { + // Common: Some settings. + Common *CommonLanguageSettings `json:"common,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Common") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Common") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *RubySettings) MarshalJSON() ([]byte, error) { + type NoMethod RubySettings + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // ServiceIdentity: Service identity for a service. This is the identity // that service producer should use to access consumer resources. type ServiceIdentity struct { @@ -4096,6 +4671,10 @@ func (s *SystemParameters) MarshalJSON() ([]byte, error) { // Type: A protocol buffer message type. type Type struct { + // Edition: The source edition string, only valid when syntax is + // SYNTAX_EDITIONS. + Edition string `json:"edition,omitempty"` + // Fields: The list of fields. Fields []*Field `json:"fields,omitempty"` @@ -4117,9 +4696,10 @@ type Type struct { // Possible values: // "SYNTAX_PROTO2" - Syntax `proto2`. // "SYNTAX_PROTO3" - Syntax `proto3`. + // "SYNTAX_EDITIONS" - Syntax `editions`. Syntax string `json:"syntax,omitempty"` - // ForceSendFields is a list of field names (e.g. "Fields") to + // ForceSendFields is a list of field names (e.g. "Edition") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -4127,8 +4707,8 @@ type Type struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Fields") to include in API - // requests with the JSON null value. By default, fields with empty + // NullFields is a list of field names (e.g. "Edition") to include in + // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. @@ -4352,17 +4932,17 @@ func (c *OperationsCancelCall) Do(opts ...googleapi.CallOption) (*Empty, error) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Empty{ ServerResponse: googleapi.ServerResponse{ @@ -4491,17 +5071,17 @@ func (c *OperationsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Empty{ ServerResponse: googleapi.ServerResponse{ @@ -4640,17 +5220,17 @@ func (c *OperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, error) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -4704,14 +5284,7 @@ type OperationsListCall struct { // List: Lists operations that match the specified filter in the // request. If the server doesn't support this method, it returns -// `UNIMPLEMENTED`. NOTE: the `name` binding allows API services to -// override the binding to use different resource name schemes, such as -// `users/*/operations`. To override the binding, API services can add a -// binding such as "/v1/{name=users/*}/operations" to their service -// configuration. For backwards compatibility, the default name includes -// the operations collection id, however overriding users must ensure -// the name binding is the parent resource, without the operations -// collection id. +// `UNIMPLEMENTED`. func (r *OperationsService) List() *OperationsListCall { c := &OperationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} return c @@ -4817,17 +5390,17 @@ func (c *OperationsListCall) Do(opts ...googleapi.CallOption) (*ListOperationsRe if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ListOperationsResponse{ ServerResponse: googleapi.ServerResponse{ @@ -4841,7 +5414,7 @@ func (c *OperationsListCall) Do(opts ...googleapi.CallOption) (*ListOperationsRe } return ret, nil // { - // "description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `UNIMPLEMENTED`. NOTE: the `name` binding allows API services to override the binding to use different resource name schemes, such as `users/*/operations`. To override the binding, API services can add a binding such as `\"/v1/{name=users/*}/operations\"` to their service configuration. For backwards compatibility, the default name includes the operations collection id, however overriding users must ensure the name binding is the parent resource, without the operations collection id.", + // "description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `UNIMPLEMENTED`.", // "flatPath": "v1/operations", // "httpMethod": "GET", // "id": "serviceusage.operations.list", @@ -4918,9 +5491,9 @@ type ServicesBatchEnableCall struct { // and no state changes occur. To enable a single service, use the // `EnableService` method instead. // -// - parent: Parent to enable services on. An example name would be: -// `projects/123` where `123` is the project number. The -// `BatchEnableServices` method currently only supports projects. +// - parent: Parent to enable services on. An example name would be: +// `projects/123` where `123` is the project number. The +// `BatchEnableServices` method currently only supports projects. func (r *ServicesService) BatchEnable(parent string, batchenableservicesrequest *BatchEnableServicesRequest) *ServicesBatchEnableCall { c := &ServicesBatchEnableCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent @@ -4995,17 +5568,17 @@ func (c *ServicesBatchEnableCall) Do(opts ...googleapi.CallOption) (*Operation, if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -5064,11 +5637,11 @@ type ServicesBatchGetCall struct { // BatchGet: Returns the service configurations and enabled states for a // given list of services. // -// - parent: Parent to retrieve services from. If this is set, the -// parent of all of the services specified in `names` must match this -// field. An example name would be: `projects/123` where `123` is the -// project number. The `BatchGetServices` method currently only -// supports projects. +// - parent: Parent to retrieve services from. If this is set, the +// parent of all of the services specified in `names` must match this +// field. An example name would be: `projects/123` where `123` is the +// project number. The `BatchGetServices` method currently only +// supports projects. func (r *ServicesService) BatchGet(parent string) *ServicesBatchGetCall { c := &ServicesBatchGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent @@ -5160,17 +5733,17 @@ func (c *ServicesBatchGetCall) Do(opts ...googleapi.CallOption) (*BatchGetServic if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &BatchGetServicesResponse{ ServerResponse: googleapi.ServerResponse{ @@ -5236,11 +5809,11 @@ type ServicesDisableCall struct { // will receive a `FAILED_PRECONDITION` status if the target service is // not currently enabled. // -// - name: Name of the consumer and service to disable the service on. -// The enable and disable methods currently only support projects. An -// example name would be: -// `projects/123/services/serviceusage.googleapis.com` where `123` is -// the project number. +// - name: Name of the consumer and service to disable the service on. +// The enable and disable methods currently only support projects. An +// example name would be: +// `projects/123/services/serviceusage.googleapis.com` where `123` is +// the project number. func (r *ServicesService) Disable(name string, disableservicerequest *DisableServiceRequest) *ServicesDisableCall { c := &ServicesDisableCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -5315,17 +5888,17 @@ func (c *ServicesDisableCall) Do(opts ...googleapi.CallOption) (*Operation, erro if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -5383,12 +5956,12 @@ type ServicesEnableCall struct { // Enable: Enable a service so that it can be used with a project. // -// - name: Name of the consumer and service to enable the service on. -// The `EnableService` and `DisableService` methods currently only -// support projects. Enabling a service requires that the service is -// public or is shared with the user enabling the service. An example -// name would be: `projects/123/services/serviceusage.googleapis.com` -// where `123` is the project number. +// - name: Name of the consumer and service to enable the service on. +// The `EnableService` and `DisableService` methods currently only +// support projects. Enabling a service requires that the service is +// public or is shared with the user enabling the service. An example +// name would be: `projects/123/services/serviceusage.googleapis.com` +// where `123` is the project number. func (r *ServicesService) Enable(name string, enableservicerequest *EnableServiceRequest) *ServicesEnableCall { c := &ServicesEnableCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -5463,17 +6036,17 @@ func (c *ServicesEnableCall) Do(opts ...googleapi.CallOption) (*Operation, error if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Operation{ ServerResponse: googleapi.ServerResponse{ @@ -5532,10 +6105,10 @@ type ServicesGetCall struct { // Get: Returns the service configuration and enabled state for a given // service. // -// - name: Name of the consumer and service to get the `ConsumerState` -// for. An example name would be: -// `projects/123/services/serviceusage.googleapis.com` where `123` is -// the project number. +// - name: Name of the consumer and service to get the `ConsumerState` +// for. An example name would be: +// `projects/123/services/serviceusage.googleapis.com` where `123` is +// the project number. func (r *ServicesService) Get(name string) *ServicesGetCall { c := &ServicesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -5617,17 +6190,17 @@ func (c *ServicesGetCall) Do(opts ...googleapi.CallOption) (*GoogleApiServiceusa if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &GoogleApiServiceusageV1Service{ ServerResponse: googleapi.ServerResponse{ @@ -5692,8 +6265,8 @@ type ServicesListCall struct { // (https://cloud.google.com/asset-inventory/docs/apis), which provides // higher throughput and richer filtering capability. // -// - parent: Parent to search for services on. An example name would be: -// `projects/123` where `123` is the project number. +// - parent: Parent to search for services on. An example name would be: +// `projects/123` where `123` is the project number. func (r *ServicesService) List(parent string) *ServicesListCall { c := &ServicesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent @@ -5799,17 +6372,17 @@ func (c *ServicesListCall) Do(opts ...googleapi.CallOption) (*ListServicesRespon if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ListServicesResponse{ ServerResponse: googleapi.ServerResponse{ diff --git a/vendor/google.golang.org/api/transport/http/configure_http2_go116.go b/vendor/google.golang.org/api/transport/http/configure_http2_go116.go deleted file mode 100644 index 305a6929c..000000000 --- a/vendor/google.golang.org/api/transport/http/configure_http2_go116.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2021 Google LLC. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.16 -// +build go1.16 - -package http - -import ( - "net/http" - "time" - - "golang.org/x/net/http2" -) - -// configureHTTP2 configures the ReadIdleTimeout HTTP/2 option for the -// transport. This allows broken idle connections to be pruned more quickly, -// preventing the client from attempting to re-use connections that will no -// longer work. -func configureHTTP2(trans *http.Transport) { - http2Trans, err := http2.ConfigureTransports(trans) - if err == nil { - http2Trans.ReadIdleTimeout = time.Second * 31 - } -} diff --git a/vendor/google.golang.org/api/transport/http/configure_http2_not_go116.go b/vendor/google.golang.org/api/transport/http/configure_http2_not_go116.go deleted file mode 100644 index d2742d283..000000000 --- a/vendor/google.golang.org/api/transport/http/configure_http2_not_go116.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2021 Google LLC. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.16 -// +build !go1.16 - -package http - -import ( - "net/http" -) - -// configureHTTP2 configures the ReadIdleTimeout HTTP/2 option for the -// transport. The interface to do this is only available in Go 1.16 and up, so -// this performs a no-op. -func configureHTTP2(trans *http.Transport) {} diff --git a/vendor/google.golang.org/api/transport/http/dial.go b/vendor/google.golang.org/api/transport/http/dial.go index cab709f0c..403509d08 100644 --- a/vendor/google.golang.org/api/transport/http/dial.go +++ b/vendor/google.golang.org/api/transport/http/dial.go @@ -16,13 +16,13 @@ import ( "time" "go.opencensus.io/plugin/ochttp" + "golang.org/x/net/http2" "golang.org/x/oauth2" "google.golang.org/api/googleapi/transport" "google.golang.org/api/internal" + "google.golang.org/api/internal/cert" "google.golang.org/api/option" - "google.golang.org/api/transport/cert" "google.golang.org/api/transport/http/internal/propagation" - "google.golang.org/api/transport/internal/dca" ) // NewClient returns an HTTP client for use communicating with a Google cloud @@ -33,7 +33,7 @@ func NewClient(ctx context.Context, opts ...option.ClientOption) (*http.Client, if err != nil { return nil, "", err } - clientCertSource, endpoint, err := dca.GetClientCertificateSourceAndEndpoint(settings) + clientCertSource, endpoint, err := internal.GetClientCertificateSourceAndEndpoint(settings) if err != nil { return nil, "", err } @@ -65,7 +65,6 @@ func newTransport(ctx context.Context, base http.RoundTripper, settings *interna paramTransport := ¶meterTransport{ base: base, userAgent: settings.UserAgent, - quotaProject: settings.QuotaProject, requestReason: settings.RequestReason, } var trans http.RoundTripper = paramTransport @@ -74,6 +73,7 @@ func newTransport(ctx context.Context, base http.RoundTripper, settings *interna case settings.NoAuth: // Do nothing. case settings.APIKey != "": + paramTransport.quotaProject = internal.GetQuotaProject(nil, settings.QuotaProject) trans = &transport.APIKey{ Transport: trans, Key: settings.APIKey, @@ -83,10 +83,7 @@ func newTransport(ctx context.Context, base http.RoundTripper, settings *interna if err != nil { return nil, err } - if paramTransport.quotaProject == "" { - paramTransport.quotaProject = internal.QuotaProjectFromCreds(creds) - } - + paramTransport.quotaProject = internal.GetQuotaProject(creds, settings.QuotaProject) ts := creds.TokenSource if settings.ImpersonationConfig == nil && settings.TokenSource != nil { ts = settings.TokenSource @@ -175,13 +172,22 @@ func defaultBaseTransport(ctx context.Context, clientCertSource cert.Source) htt } } - // If possible, configure http2 transport in order to use ReadIdleTimeout - // setting. This can only be done in Go 1.16 and up. configureHTTP2(trans) return trans } +// configureHTTP2 configures the ReadIdleTimeout HTTP/2 option for the +// transport. This allows broken idle connections to be pruned more quickly, +// preventing the client from attempting to re-use connections that will no +// longer work. +func configureHTTP2(trans *http.Transport) { + http2Trans, err := http2.ConfigureTransports(trans) + if err == nil { + http2Trans.ReadIdleTimeout = time.Second * 31 + } +} + // fallbackBaseTransport is used in " or "project: google.protobuf.Duration - 10, // 1: google.rpc.QuotaFailure.violations:type_name -> google.rpc.QuotaFailure.Violation - 11, // 2: google.rpc.ErrorInfo.metadata:type_name -> google.rpc.ErrorInfo.MetadataEntry + 10, // 0: google.rpc.ErrorInfo.metadata:type_name -> google.rpc.ErrorInfo.MetadataEntry + 15, // 1: google.rpc.RetryInfo.retry_delay:type_name -> google.protobuf.Duration + 11, // 2: google.rpc.QuotaFailure.violations:type_name -> google.rpc.QuotaFailure.Violation 12, // 3: google.rpc.PreconditionFailure.violations:type_name -> google.rpc.PreconditionFailure.Violation 13, // 4: google.rpc.BadRequest.field_violations:type_name -> google.rpc.BadRequest.FieldViolation 14, // 5: google.rpc.Help.links:type_name -> google.rpc.Help.Link @@ -1089,7 +1125,7 @@ func file_google_rpc_error_details_proto_init() { } if !protoimpl.UnsafeEnabled { file_google_rpc_error_details_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RetryInfo); i { + switch v := v.(*ErrorInfo); i { case 0: return &v.state case 1: @@ -1101,7 +1137,7 @@ func file_google_rpc_error_details_proto_init() { } } file_google_rpc_error_details_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DebugInfo); i { + switch v := v.(*RetryInfo); i { case 0: return &v.state case 1: @@ -1113,7 +1149,7 @@ func file_google_rpc_error_details_proto_init() { } } file_google_rpc_error_details_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*QuotaFailure); i { + switch v := v.(*DebugInfo); i { case 0: return &v.state case 1: @@ -1125,7 +1161,7 @@ func file_google_rpc_error_details_proto_init() { } } file_google_rpc_error_details_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ErrorInfo); i { + switch v := v.(*QuotaFailure); i { case 0: return &v.state case 1: @@ -1208,7 +1244,7 @@ func file_google_rpc_error_details_proto_init() { return nil } } - file_google_rpc_error_details_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + file_google_rpc_error_details_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*QuotaFailure_Violation); i { case 0: return &v.state diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go index f34a38e4e..a6b508188 100644 --- a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go @@ -1,4 +1,4 @@ -// Copyright 2020 Google LLC +// Copyright 2022 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.12.2 +// protoc v3.21.9 // source: google/rpc/status.proto package status @@ -48,11 +48,13 @@ type Status struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code]. + // The status code, which should be an enum value of + // [google.rpc.Code][google.rpc.Code]. Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` // A developer-facing error message, which should be in English. Any // user-facing error message should be localized and sent in the - // [google.rpc.Status.details][google.rpc.Status.details] field, or localized by the client. + // [google.rpc.Status.details][google.rpc.Status.details] field, or localized + // by the client. Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` // A list of messages that carry the error details. There is a common set of // message types for APIs to use. diff --git a/vendor/google.golang.org/grpc/CONTRIBUTING.md b/vendor/google.golang.org/grpc/CONTRIBUTING.md index cd03f8c76..608aa6e1a 100644 --- a/vendor/google.golang.org/grpc/CONTRIBUTING.md +++ b/vendor/google.golang.org/grpc/CONTRIBUTING.md @@ -20,6 +20,15 @@ How to get your contributions merged smoothly and quickly. both author's & review's time is wasted. Create more PRs to address different concerns and everyone will be happy. +- If you are searching for features to work on, issues labeled [Status: Help + Wanted](https://github.com/grpc/grpc-go/issues?q=is%3Aissue+is%3Aopen+sort%3Aupdated-desc+label%3A%22Status%3A+Help+Wanted%22) + is a great place to start. These issues are well-documented and usually can be + resolved with a single pull request. + +- If you are adding a new file, make sure it has the copyright message template + at the top as a comment. You can copy over the message from an existing file + and update the year. + - The grpc package should only depend on standard Go packages and a small number of exceptions. If your contribution introduces new dependencies which are NOT in the [list](https://godoc.org/google.golang.org/grpc?imports), you need a @@ -32,14 +41,18 @@ How to get your contributions merged smoothly and quickly. - Provide a good **PR description** as a record of **what** change is being made and **why** it was made. Link to a github issue if it exists. -- Don't fix code style and formatting unless you are already changing that line - to address an issue. PRs with irrelevant changes won't be merged. If you do - want to fix formatting or style, do that in a separate PR. +- If you want to fix formatting or style, consider whether your changes are an + obvious improvement or might be considered a personal preference. If a style + change is based on preference, it likely will not be accepted. If it corrects + widely agreed-upon anti-patterns, then please do create a PR and explain the + benefits of the change. - Unless your PR is trivial, you should expect there will be reviewer comments - that you'll need to address before merging. We expect you to be reasonably - responsive to those comments, otherwise the PR will be closed after 2-3 weeks - of inactivity. + that you'll need to address before merging. We'll mark it as `Status: Requires + Reporter Clarification` if we expect you to respond to these comments in a + timely manner. If the PR remains inactive for 6 days, it will be marked as + `stale` and automatically close 7 days after that if we don't hear back from + you. - Maintain **clean commit history** and use **meaningful commit messages**. PRs with messy commit history are difficult to review and won't be merged. Use @@ -53,9 +66,8 @@ How to get your contributions merged smoothly and quickly. - **All tests need to be passing** before your change can be merged. We recommend you **run tests locally** before creating your PR to catch breakages early on. - - `make all` to test everything, OR - - `make vet` to catch vet errors - - `make test` to run the tests - - `make testrace` to run tests in race mode + - `VET_SKIP_PROTO=1 ./vet.sh` to catch vet errors + - `go test -cpu 1,4 -timeout 7m ./...` to run the tests + - `go test -race -cpu 1,4 -timeout 7m ./...` to run tests in race mode - Exceptions to the rules can be made if there's a compelling reason for doing so. diff --git a/vendor/google.golang.org/grpc/MAINTAINERS.md b/vendor/google.golang.org/grpc/MAINTAINERS.md index 093c82b3a..c6672c0a3 100644 --- a/vendor/google.golang.org/grpc/MAINTAINERS.md +++ b/vendor/google.golang.org/grpc/MAINTAINERS.md @@ -8,17 +8,18 @@ See [CONTRIBUTING.md](https://github.com/grpc/grpc-community/blob/master/CONTRIB for general contribution guidelines. ## Maintainers (in alphabetical order) -- [canguler](https://github.com/canguler), Google LLC + - [cesarghali](https://github.com/cesarghali), Google LLC - [dfawley](https://github.com/dfawley), Google LLC - [easwars](https://github.com/easwars), Google LLC -- [jadekler](https://github.com/jadekler), Google LLC - [menghanl](https://github.com/menghanl), Google LLC - [srini100](https://github.com/srini100), Google LLC ## Emeritus Maintainers (in alphabetical order) - [adelez](https://github.com/adelez), Google LLC +- [canguler](https://github.com/canguler), Google LLC - [iamqizhao](https://github.com/iamqizhao), Google LLC +- [jadekler](https://github.com/jadekler), Google LLC - [jtattermusch](https://github.com/jtattermusch), Google LLC - [lyuxuan](https://github.com/lyuxuan), Google LLC - [makmukhi](https://github.com/makmukhi), Google LLC diff --git a/vendor/google.golang.org/grpc/Makefile b/vendor/google.golang.org/grpc/Makefile index 1f0722f16..1f8960922 100644 --- a/vendor/google.golang.org/grpc/Makefile +++ b/vendor/google.golang.org/grpc/Makefile @@ -41,8 +41,6 @@ vetdeps: clean \ proto \ test \ - testappengine \ - testappenginedeps \ testrace \ vet \ vetdeps diff --git a/vendor/google.golang.org/grpc/NOTICE.txt b/vendor/google.golang.org/grpc/NOTICE.txt new file mode 100644 index 000000000..530197749 --- /dev/null +++ b/vendor/google.golang.org/grpc/NOTICE.txt @@ -0,0 +1,13 @@ +Copyright 2014 gRPC authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/google.golang.org/grpc/attributes/attributes.go b/vendor/google.golang.org/grpc/attributes/attributes.go index 3220d87be..3efca4591 100644 --- a/vendor/google.golang.org/grpc/attributes/attributes.go +++ b/vendor/google.golang.org/grpc/attributes/attributes.go @@ -19,61 +19,112 @@ // Package attributes defines a generic key/value store used in various gRPC // components. // -// Experimental +// # Experimental // // Notice: This package is EXPERIMENTAL and may be changed or removed in a // later release. package attributes -import "fmt" +import ( + "fmt" + "strings" +) // Attributes is an immutable struct for storing and retrieving generic // key/value pairs. Keys must be hashable, and users should define their own -// types for keys. +// types for keys. Values should not be modified after they are added to an +// Attributes or if they were received from one. If values implement 'Equal(o +// interface{}) bool', it will be called by (*Attributes).Equal to determine +// whether two values with the same key should be considered equal. type Attributes struct { m map[interface{}]interface{} } -// New returns a new Attributes containing all key/value pairs in kvs. If the -// same key appears multiple times, the last value overwrites all previous -// values for that key. Panics if len(kvs) is not even. -func New(kvs ...interface{}) *Attributes { - if len(kvs)%2 != 0 { - panic(fmt.Sprintf("attributes.New called with unexpected input: len(kvs) = %v", len(kvs))) - } - a := &Attributes{m: make(map[interface{}]interface{}, len(kvs)/2)} - for i := 0; i < len(kvs)/2; i++ { - a.m[kvs[i*2]] = kvs[i*2+1] - } - return a +// New returns a new Attributes containing the key/value pair. +func New(key, value interface{}) *Attributes { + return &Attributes{m: map[interface{}]interface{}{key: value}} } -// WithValues returns a new Attributes containing all key/value pairs in a and -// kvs. Panics if len(kvs) is not even. If the same key appears multiple -// times, the last value overwrites all previous values for that key. To -// remove an existing key, use a nil value. -func (a *Attributes) WithValues(kvs ...interface{}) *Attributes { +// WithValue returns a new Attributes containing the previous keys and values +// and the new key/value pair. If the same key appears multiple times, the +// last value overwrites all previous values for that key. To remove an +// existing key, use a nil value. value should not be modified later. +func (a *Attributes) WithValue(key, value interface{}) *Attributes { if a == nil { - return New(kvs...) + return New(key, value) } - if len(kvs)%2 != 0 { - panic(fmt.Sprintf("attributes.New called with unexpected input: len(kvs) = %v", len(kvs))) - } - n := &Attributes{m: make(map[interface{}]interface{}, len(a.m)+len(kvs)/2)} + n := &Attributes{m: make(map[interface{}]interface{}, len(a.m)+1)} for k, v := range a.m { n.m[k] = v } - for i := 0; i < len(kvs)/2; i++ { - n.m[kvs[i*2]] = kvs[i*2+1] - } + n.m[key] = value return n } // Value returns the value associated with these attributes for key, or nil if -// no value is associated with key. +// no value is associated with key. The returned value should not be modified. func (a *Attributes) Value(key interface{}) interface{} { if a == nil { return nil } return a.m[key] } + +// Equal returns whether a and o are equivalent. If 'Equal(o interface{}) +// bool' is implemented for a value in the attributes, it is called to +// determine if the value matches the one stored in the other attributes. If +// Equal is not implemented, standard equality is used to determine if the two +// values are equal. Note that some types (e.g. maps) aren't comparable by +// default, so they must be wrapped in a struct, or in an alias type, with Equal +// defined. +func (a *Attributes) Equal(o *Attributes) bool { + if a == nil && o == nil { + return true + } + if a == nil || o == nil { + return false + } + if len(a.m) != len(o.m) { + return false + } + for k, v := range a.m { + ov, ok := o.m[k] + if !ok { + // o missing element of a + return false + } + if eq, ok := v.(interface{ Equal(o interface{}) bool }); ok { + if !eq.Equal(ov) { + return false + } + } else if v != ov { + // Fallback to a standard equality check if Value is unimplemented. + return false + } + } + return true +} + +// String prints the attribute map. If any key or values throughout the map +// implement fmt.Stringer, it calls that method and appends. +func (a *Attributes) String() string { + var sb strings.Builder + sb.WriteString("{") + first := true + for k, v := range a.m { + var key, val string + if str, ok := k.(interface{ String() string }); ok { + key = str.String() + } + if str, ok := v.(interface{ String() string }); ok { + val = str.String() + } + if !first { + sb.WriteString(", ") + } + sb.WriteString(fmt.Sprintf("%q: %q, ", key, val)) + first = false + } + sb.WriteString("}") + return sb.String() +} diff --git a/vendor/google.golang.org/grpc/backoff.go b/vendor/google.golang.org/grpc/backoff.go index 542594f5c..29475e31c 100644 --- a/vendor/google.golang.org/grpc/backoff.go +++ b/vendor/google.golang.org/grpc/backoff.go @@ -48,7 +48,7 @@ type BackoffConfig struct { // here for more details: // https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go index ab531f4c0..8f00523c0 100644 --- a/vendor/google.golang.org/grpc/balancer/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/balancer.go @@ -27,6 +27,7 @@ import ( "net" "strings" + "google.golang.org/grpc/channelz" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal" @@ -75,24 +76,26 @@ func Get(name string) Builder { return nil } -// SubConn represents a gRPC sub connection. -// Each sub connection contains a list of addresses. gRPC will -// try to connect to them (in sequence), and stop trying the -// remainder once one connection is successful. +// A SubConn represents a single connection to a gRPC backend service. // -// The reconnect backoff will be applied on the list, not a single address. -// For example, try_on_all_addresses -> backoff -> try_on_all_addresses. +// Each SubConn contains a list of addresses. // -// All SubConns start in IDLE, and will not try to connect. To trigger -// the connecting, Balancers must call Connect. -// When the connection encounters an error, it will reconnect immediately. -// When the connection becomes IDLE, it will not reconnect unless Connect is -// called. +// All SubConns start in IDLE, and will not try to connect. To trigger the +// connecting, Balancers must call Connect. If a connection re-enters IDLE, +// Balancers must call Connect again to trigger a new connection attempt. // -// This interface is to be implemented by gRPC. Users should not need a -// brand new implementation of this interface. For the situations like -// testing, the new implementation should embed this interface. This allows -// gRPC to add new methods to this interface. +// gRPC will try to connect to the addresses in sequence, and stop trying the +// remainder once the first connection is successful. If an attempt to connect +// to all addresses encounters an error, the SubConn will enter +// TRANSIENT_FAILURE for a backoff period, and then transition to IDLE. +// +// Once established, if a connection is lost, the SubConn will transition +// directly to IDLE. +// +// This interface is to be implemented by gRPC. Users should not need their own +// implementation of this interface. For situations like testing, any +// implementations should embed this interface. This allows gRPC to add new +// methods to this interface. type SubConn interface { // UpdateAddresses updates the addresses used in this SubConn. // gRPC checks if currently-connected address is still in the new list. @@ -107,6 +110,11 @@ type SubConn interface { UpdateAddresses([]resolver.Address) // Connect starts the connecting for this SubConn. Connect() + // GetOrBuildProducer returns a reference to the existing Producer for this + // ProducerBuilder in this SubConn, or, if one does not currently exist, + // creates a new one and returns it. Returns a close function which must + // be called when the Producer is no longer needed. + GetOrBuildProducer(ProducerBuilder) (p Producer, close func()) } // NewSubConnOptions contains options to create new SubConn. @@ -172,25 +180,32 @@ type ClientConn interface { // BuildOptions contains additional information for Build. type BuildOptions struct { - // DialCreds is the transport credential the Balancer implementation can - // use to dial to a remote load balancer server. The Balancer implementations - // can ignore this if it does not need to talk to another party securely. + // DialCreds is the transport credentials to use when communicating with a + // remote load balancer server. Balancer implementations which do not + // communicate with a remote load balancer server can ignore this field. DialCreds credentials.TransportCredentials - // CredsBundle is the credentials bundle that the Balancer can use. + // CredsBundle is the credentials bundle to use when communicating with a + // remote load balancer server. Balancer implementations which do not + // communicate with a remote load balancer server can ignore this field. CredsBundle credentials.Bundle - // Dialer is the custom dialer the Balancer implementation can use to dial - // to a remote load balancer server. The Balancer implementations - // can ignore this if it doesn't need to talk to remote balancer. + // Dialer is the custom dialer to use when communicating with a remote load + // balancer server. Balancer implementations which do not communicate with a + // remote load balancer server can ignore this field. Dialer func(context.Context, string) (net.Conn, error) - // ChannelzParentID is the entity parent's channelz unique identification number. - ChannelzParentID int64 + // Authority is the server name to use as part of the authentication + // handshake when communicating with a remote load balancer server. Balancer + // implementations which do not communicate with a remote load balancer + // server can ignore this field. + Authority string + // ChannelzParentID is the parent ClientConn's channelz ID. + ChannelzParentID *channelz.Identifier // CustomUserAgent is the custom user agent set on the parent ClientConn. // The balancer should set the same custom user agent if it creates a // ClientConn. CustomUserAgent string - // Target contains the parsed address info of the dial target. It is the same resolver.Target as - // passed to the resolver. - // See the documentation for the resolver.Target type for details about what it contains. + // Target contains the parsed address info of the dial target. It is the + // same resolver.Target as passed to the resolver. See the documentation for + // the resolver.Target type for details about what it contains. Target resolver.Target } @@ -234,7 +249,7 @@ type DoneInfo struct { // ServerLoad is the load received from server. It's usually sent as part of // trailing metadata. // - // The only supported type now is *orca_v1.LoadReport. + // The only supported type now is *orca_v3.LoadReport. ServerLoad interface{} } @@ -264,6 +279,14 @@ type PickResult struct { // type, Done may not be called. May be nil if the balancer does not wish // to be notified when the RPC completes. Done func(DoneInfo) + + // Metadata provides a way for LB policies to inject arbitrary per-call + // metadata. Any metadata returned here will be merged with existing + // metadata added by the client application. + // + // LB policies with child policies are responsible for propagating metadata + // injected by their children to the ClientConn, as part of Pick(). + Metadata metadata.MD } // TransientFailureError returns e. It exists for backward compatibility and @@ -326,6 +349,20 @@ type Balancer interface { Close() } +// ExitIdler is an optional interface for balancers to implement. If +// implemented, ExitIdle will be called when ClientConn.Connect is called, if +// the ClientConn is idle. If unimplemented, ClientConn.Connect will cause +// all SubConns to connect. +// +// Notice: it will be required for all balancers to implement this in a future +// release. +type ExitIdler interface { + // ExitIdle instructs the LB policy to reconnect to backends / exit the + // IDLE state, if appropriate and possible. Note that SubConns that enter + // the IDLE state will not reconnect until SubConn.Connect is called. + ExitIdle() +} + // SubConnState describes the state of a SubConn. type SubConnState struct { // ConnectivityState is the connectivity state of the SubConn. @@ -348,41 +385,20 @@ type ClientConnState struct { // problem with the provided name resolver data. var ErrBadResolverState = errors.New("bad resolver state") -// ConnectivityStateEvaluator takes the connectivity states of multiple SubConns -// and returns one aggregated connectivity state. -// -// It's not thread safe. -type ConnectivityStateEvaluator struct { - numReady uint64 // Number of addrConns in ready state. - numConnecting uint64 // Number of addrConns in connecting state. +// A ProducerBuilder is a simple constructor for a Producer. It is used by the +// SubConn to create producers when needed. +type ProducerBuilder interface { + // Build creates a Producer. The first parameter is always a + // grpc.ClientConnInterface (a type to allow creating RPCs/streams on the + // associated SubConn), but is declared as interface{} to avoid a + // dependency cycle. Should also return a close function that will be + // called when all references to the Producer have been given up. + Build(grpcClientConnInterface interface{}) (p Producer, close func()) } -// RecordTransition records state change happening in subConn and based on that -// it evaluates what aggregated state should be. -// -// - If at least one SubConn in Ready, the aggregated state is Ready; -// - Else if at least one SubConn in Connecting, the aggregated state is Connecting; -// - Else the aggregated state is TransientFailure. -// -// Idle and Shutdown are not considered. -func (cse *ConnectivityStateEvaluator) RecordTransition(oldState, newState connectivity.State) connectivity.State { - // Update counters. - for idx, state := range []connectivity.State{oldState, newState} { - updateVal := 2*uint64(idx) - 1 // -1 for oldState and +1 for new. - switch state { - case connectivity.Ready: - cse.numReady += updateVal - case connectivity.Connecting: - cse.numConnecting += updateVal - } - } - - // Evaluate. - if cse.numReady > 0 { - return connectivity.Ready - } - if cse.numConnecting > 0 { - return connectivity.Connecting - } - return connectivity.TransientFailure +// A Producer is a type shared among potentially many consumers. It is +// associated with a SubConn, and an implementation will typically contain +// other methods to provide additional functionality, e.g. configuration or +// subscription registration. +type Producer interface { } diff --git a/vendor/google.golang.org/grpc/balancer/base/balancer.go b/vendor/google.golang.org/grpc/balancer/base/balancer.go index c883efa0b..3929c26d3 100644 --- a/vendor/google.golang.org/grpc/balancer/base/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/base/balancer.go @@ -22,7 +22,6 @@ import ( "errors" "fmt" - "google.golang.org/grpc/attributes" "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/grpclog" @@ -42,10 +41,11 @@ func (bb *baseBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) cc: cc, pickerBuilder: bb.pickerBuilder, - subConns: make(map[resolver.Address]subConnInfo), + subConns: resolver.NewAddressMap(), scStates: make(map[balancer.SubConn]connectivity.State), csEvltr: &balancer.ConnectivityStateEvaluator{}, config: bb.config, + state: connectivity.Connecting, } // Initialize picker to a picker that always returns // ErrNoSubConnAvailable, because when state of a SubConn changes, we @@ -58,11 +58,6 @@ func (bb *baseBuilder) Name() string { return bb.name } -type subConnInfo struct { - subConn balancer.SubConn - attrs *attributes.Attributes -} - type baseBalancer struct { cc balancer.ClientConn pickerBuilder PickerBuilder @@ -70,7 +65,7 @@ type baseBalancer struct { csEvltr *balancer.ConnectivityStateEvaluator state connectivity.State - subConns map[resolver.Address]subConnInfo // `attributes` is stripped from the keys of this map (the addresses) + subConns *resolver.AddressMap scStates map[balancer.SubConn]connectivity.State picker balancer.Picker config Config @@ -81,7 +76,7 @@ type baseBalancer struct { func (b *baseBalancer) ResolverError(err error) { b.resolverErr = err - if len(b.subConns) == 0 { + if b.subConns.Len() == 0 { b.state = connectivity.TransientFailure } @@ -105,52 +100,29 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error { // Successful resolution; clear resolver error and ensure we return nil. b.resolverErr = nil // addrsSet is the set converted from addrs, it's used for quick lookup of an address. - addrsSet := make(map[resolver.Address]struct{}) + addrsSet := resolver.NewAddressMap() for _, a := range s.ResolverState.Addresses { - // Strip attributes from addresses before using them as map keys. So - // that when two addresses only differ in attributes pointers (but with - // the same attribute content), they are considered the same address. - // - // Note that this doesn't handle the case where the attribute content is - // different. So if users want to set different attributes to create - // duplicate connections to the same backend, it doesn't work. This is - // fine for now, because duplicate is done by setting Metadata today. - // - // TODO: read attributes to handle duplicate connections. - aNoAttrs := a - aNoAttrs.Attributes = nil - addrsSet[aNoAttrs] = struct{}{} - if scInfo, ok := b.subConns[aNoAttrs]; !ok { + addrsSet.Set(a, nil) + if _, ok := b.subConns.Get(a); !ok { // a is a new address (not existing in b.subConns). - // - // When creating SubConn, the original address with attributes is - // passed through. So that connection configurations in attributes - // (like creds) will be used. sc, err := b.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{HealthCheckEnabled: b.config.HealthCheck}) if err != nil { logger.Warningf("base.baseBalancer: failed to create new SubConn: %v", err) continue } - b.subConns[aNoAttrs] = subConnInfo{subConn: sc, attrs: a.Attributes} + b.subConns.Set(a, sc) b.scStates[sc] = connectivity.Idle + b.csEvltr.RecordTransition(connectivity.Shutdown, connectivity.Idle) sc.Connect() - } else { - // Always update the subconn's address in case the attributes - // changed. - // - // The SubConn does a reflect.DeepEqual of the new and old - // addresses. So this is a noop if the current address is the same - // as the old one (including attributes). - scInfo.attrs = a.Attributes - b.subConns[aNoAttrs] = scInfo - b.cc.UpdateAddresses(scInfo.subConn, []resolver.Address{a}) } } - for a, scInfo := range b.subConns { + for _, a := range b.subConns.Keys() { + sci, _ := b.subConns.Get(a) + sc := sci.(balancer.SubConn) // a was removed by resolver. - if _, ok := addrsSet[a]; !ok { - b.cc.RemoveSubConn(scInfo.subConn) - delete(b.subConns, a) + if _, ok := addrsSet.Get(a); !ok { + b.cc.RemoveSubConn(sc) + b.subConns.Delete(a) // Keep the state of this sc in b.scStates until sc's state becomes Shutdown. // The entry will be deleted in UpdateSubConnState. } @@ -163,6 +135,9 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error { b.ResolverError(errors.New("produced zero addresses")) return balancer.ErrBadResolverState } + + b.regeneratePicker() + b.cc.UpdateState(balancer.State{ConnectivityState: b.state, Picker: b.picker}) return nil } @@ -182,8 +157,8 @@ func (b *baseBalancer) mergeErrors() error { // regeneratePicker takes a snapshot of the balancer, and generates a picker // from it. The picker is -// - errPicker if the balancer is in TransientFailure, -// - built by the pickerBuilder with all READY SubConns otherwise. +// - errPicker if the balancer is in TransientFailure, +// - built by the pickerBuilder with all READY SubConns otherwise. func (b *baseBalancer) regeneratePicker() { if b.state == connectivity.TransientFailure { b.picker = NewErrPicker(b.mergeErrors()) @@ -192,10 +167,11 @@ func (b *baseBalancer) regeneratePicker() { readySCs := make(map[balancer.SubConn]SubConnInfo) // Filter out all ready SCs from full subConn map. - for addr, scInfo := range b.subConns { - if st, ok := b.scStates[scInfo.subConn]; ok && st == connectivity.Ready { - addr.Attributes = scInfo.attrs - readySCs[scInfo.subConn] = SubConnInfo{Address: addr} + for _, addr := range b.subConns.Keys() { + sci, _ := b.subConns.Get(addr) + sc := sci.(balancer.SubConn) + if st, ok := b.scStates[sc]; ok && st == connectivity.Ready { + readySCs[sc] = SubConnInfo{Address: addr} } } b.picker = b.pickerBuilder.Build(PickerBuildInfo{ReadySCs: readySCs}) @@ -213,10 +189,14 @@ func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.Su } return } - if oldS == connectivity.TransientFailure && s == connectivity.Connecting { - // Once a subconn enters TRANSIENT_FAILURE, ignore subsequent + if oldS == connectivity.TransientFailure && + (s == connectivity.Connecting || s == connectivity.Idle) { + // Once a subconn enters TRANSIENT_FAILURE, ignore subsequent IDLE or // CONNECTING transitions to prevent the aggregated state from being // always CONNECTING when many backends exist but are all down. + if s == connectivity.Idle { + sc.Connect() + } return } b.scStates[sc] = s @@ -242,7 +222,6 @@ func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.Su b.state == connectivity.TransientFailure { b.regeneratePicker() } - b.cc.UpdateState(balancer.State{ConnectivityState: b.state, Picker: b.picker}) } @@ -251,6 +230,11 @@ func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.Su func (b *baseBalancer) Close() { } +// ExitIdle is a nop because the base balancer attempts to stay connected to +// all SubConns at all times. +func (b *baseBalancer) ExitIdle() { +} + // NewErrPicker returns a Picker that always returns err on Pick(). func NewErrPicker(err error) balancer.Picker { return &errPicker{err: err} diff --git a/vendor/google.golang.org/grpc/balancer/conn_state_evaluator.go b/vendor/google.golang.org/grpc/balancer/conn_state_evaluator.go new file mode 100644 index 000000000..c33413581 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/conn_state_evaluator.go @@ -0,0 +1,74 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package balancer + +import "google.golang.org/grpc/connectivity" + +// ConnectivityStateEvaluator takes the connectivity states of multiple SubConns +// and returns one aggregated connectivity state. +// +// It's not thread safe. +type ConnectivityStateEvaluator struct { + numReady uint64 // Number of addrConns in ready state. + numConnecting uint64 // Number of addrConns in connecting state. + numTransientFailure uint64 // Number of addrConns in transient failure state. + numIdle uint64 // Number of addrConns in idle state. +} + +// RecordTransition records state change happening in subConn and based on that +// it evaluates what aggregated state should be. +// +// - If at least one SubConn in Ready, the aggregated state is Ready; +// - Else if at least one SubConn in Connecting, the aggregated state is Connecting; +// - Else if at least one SubConn is Idle, the aggregated state is Idle; +// - Else if at least one SubConn is TransientFailure (or there are no SubConns), the aggregated state is Transient Failure. +// +// Shutdown is not considered. +func (cse *ConnectivityStateEvaluator) RecordTransition(oldState, newState connectivity.State) connectivity.State { + // Update counters. + for idx, state := range []connectivity.State{oldState, newState} { + updateVal := 2*uint64(idx) - 1 // -1 for oldState and +1 for new. + switch state { + case connectivity.Ready: + cse.numReady += updateVal + case connectivity.Connecting: + cse.numConnecting += updateVal + case connectivity.TransientFailure: + cse.numTransientFailure += updateVal + case connectivity.Idle: + cse.numIdle += updateVal + } + } + return cse.CurrentState() +} + +// CurrentState returns the current aggregate conn state by evaluating the counters +func (cse *ConnectivityStateEvaluator) CurrentState() connectivity.State { + // Evaluate. + if cse.numReady > 0 { + return connectivity.Ready + } + if cse.numConnecting > 0 { + return connectivity.Connecting + } + if cse.numIdle > 0 { + return connectivity.Idle + } + return connectivity.TransientFailure +} diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/state/state.go b/vendor/google.golang.org/grpc/balancer/grpclb/state/state.go index a24264a34..4ecfa1c21 100644 --- a/vendor/google.golang.org/grpc/balancer/grpclb/state/state.go +++ b/vendor/google.golang.org/grpc/balancer/grpclb/state/state.go @@ -39,7 +39,7 @@ type State struct { // Set returns a copy of the provided state with attributes containing s. s's // data should not be mutated after calling Set. func Set(state resolver.State, s *State) resolver.State { - state.Attributes = state.Attributes.WithValues(key, s) + state.Attributes = state.Attributes.WithValue(key, s) return state } diff --git a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go index 43c2a1537..f7031ad22 100644 --- a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go +++ b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go @@ -22,7 +22,7 @@ package roundrobin import ( - "sync" + "sync/atomic" "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/base" @@ -47,11 +47,11 @@ func init() { type rrPickerBuilder struct{} func (*rrPickerBuilder) Build(info base.PickerBuildInfo) balancer.Picker { - logger.Infof("roundrobinPicker: newPicker called with info: %v", info) + logger.Infof("roundrobinPicker: Build called with info: %v", info) if len(info.ReadySCs) == 0 { return base.NewErrPicker(balancer.ErrNoSubConnAvailable) } - var scs []balancer.SubConn + scs := make([]balancer.SubConn, 0, len(info.ReadySCs)) for sc := range info.ReadySCs { scs = append(scs, sc) } @@ -60,7 +60,7 @@ func (*rrPickerBuilder) Build(info base.PickerBuildInfo) balancer.Picker { // Start at a random index, as the same RR balancer rebuilds a new // picker when SubConn states change, and we don't want to apply excess // load to the first server in the list. - next: grpcrand.Intn(len(scs)), + next: uint32(grpcrand.Intn(len(scs))), } } @@ -69,15 +69,13 @@ type rrPicker struct { // created. The slice is immutable. Each Get() will do a round robin // selection from it and return the selected SubConn. subConns []balancer.SubConn - - mu sync.Mutex - next int + next uint32 } func (p *rrPicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { - p.mu.Lock() - sc := p.subConns[p.next] - p.next = (p.next + 1) % len(p.subConns) - p.mu.Unlock() + subConnsLen := uint32(len(p.subConns)) + nextIndex := atomic.AddUint32(&p.next, 1) + + sc := p.subConns[nextIndex%subConnsLen] return balancer.PickResult{SubConn: sc}, nil } diff --git a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go index dd8397963..04b9ad411 100644 --- a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go +++ b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go @@ -19,164 +19,331 @@ package grpc import ( + "context" "fmt" + "strings" "sync" "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/internal/buffer" + "google.golang.org/grpc/internal/balancer/gracefulswitch" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/resolver" ) -// scStateUpdate contains the subConn and the new state it changed to. -type scStateUpdate struct { - sc balancer.SubConn - state connectivity.State - err error -} +type ccbMode int + +const ( + ccbModeActive = iota + ccbModeIdle + ccbModeClosed + ccbModeExitingIdle +) -// ccBalancerWrapper is a wrapper on top of cc for balancers. -// It implements balancer.ClientConn interface. +// ccBalancerWrapper sits between the ClientConn and the Balancer. +// +// ccBalancerWrapper implements methods corresponding to the ones on the +// balancer.Balancer interface. The ClientConn is free to call these methods +// concurrently and the ccBalancerWrapper ensures that calls from the ClientConn +// to the Balancer happen synchronously and in order. +// +// ccBalancerWrapper also implements the balancer.ClientConn interface and is +// passed to the Balancer implementations. It invokes unexported methods on the +// ClientConn to handle these calls from the Balancer. +// +// It uses the gracefulswitch.Balancer internally to ensure that balancer +// switches happen in a graceful manner. type ccBalancerWrapper struct { - cc *ClientConn - balancerMu sync.Mutex // synchronizes calls to the balancer - balancer balancer.Balancer - updateCh *buffer.Unbounded - closed *grpcsync.Event - done *grpcsync.Event - - mu sync.Mutex - subConns map[*acBalancerWrapper]struct{} + // The following fields are initialized when the wrapper is created and are + // read-only afterwards, and therefore can be accessed without a mutex. + cc *ClientConn + opts balancer.BuildOptions + + // Outgoing (gRPC --> balancer) calls are guaranteed to execute in a + // mutually exclusive manner as they are scheduled in the serializer. Fields + // accessed *only* in these serializer callbacks, can therefore be accessed + // without a mutex. + balancer *gracefulswitch.Balancer + curBalancerName string + + // mu guards access to the below fields. Access to the serializer and its + // cancel function needs to be mutex protected because they are overwritten + // when the wrapper exits idle mode. + mu sync.Mutex + serializer *grpcsync.CallbackSerializer // To serialize all outoing calls. + serializerCancel context.CancelFunc // To close the seralizer at close/enterIdle time. + mode ccbMode // Tracks the current mode of the wrapper. } -func newCCBalancerWrapper(cc *ClientConn, b balancer.Builder, bopts balancer.BuildOptions) *ccBalancerWrapper { +// newCCBalancerWrapper creates a new balancer wrapper. The underlying balancer +// is not created until the switchTo() method is invoked. +func newCCBalancerWrapper(cc *ClientConn, bopts balancer.BuildOptions) *ccBalancerWrapper { + ctx, cancel := context.WithCancel(context.Background()) ccb := &ccBalancerWrapper{ - cc: cc, - updateCh: buffer.NewUnbounded(), - closed: grpcsync.NewEvent(), - done: grpcsync.NewEvent(), - subConns: make(map[*acBalancerWrapper]struct{}), + cc: cc, + opts: bopts, + serializer: grpcsync.NewCallbackSerializer(ctx), + serializerCancel: cancel, } - go ccb.watcher() - ccb.balancer = b.Build(ccb, bopts) + ccb.balancer = gracefulswitch.NewBalancer(ccb, bopts) return ccb } -// watcher balancer functions sequentially, so the balancer can be implemented -// lock-free. -func (ccb *ccBalancerWrapper) watcher() { - for { - select { - case t := <-ccb.updateCh.Get(): - ccb.updateCh.Load() - if ccb.closed.HasFired() { - break - } - switch u := t.(type) { - case *scStateUpdate: - ccb.balancerMu.Lock() - ccb.balancer.UpdateSubConnState(u.sc, balancer.SubConnState{ConnectivityState: u.state, ConnectionError: u.err}) - ccb.balancerMu.Unlock() - case *acBalancerWrapper: - ccb.mu.Lock() - if ccb.subConns != nil { - delete(ccb.subConns, u) - ccb.cc.removeAddrConn(u.getAddrConn(), errConnDrain) +// updateClientConnState is invoked by grpc to push a ClientConnState update to +// the underlying balancer. +func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error { + ccb.mu.Lock() + errCh := make(chan error, 1) + // Here and everywhere else where Schedule() is called, it is done with the + // lock held. But the lock guards only the scheduling part. The actual + // callback is called asynchronously without the lock being held. + ok := ccb.serializer.Schedule(func(_ context.Context) { + // If the addresses specified in the update contain addresses of type + // "grpclb" and the selected LB policy is not "grpclb", these addresses + // will be filtered out and ccs will be modified with the updated + // address list. + if ccb.curBalancerName != grpclbName { + var addrs []resolver.Address + for _, addr := range ccs.ResolverState.Addresses { + if addr.Type == resolver.GRPCLB { + continue } - ccb.mu.Unlock() - default: - logger.Errorf("ccBalancerWrapper.watcher: unknown update %+v, type %T", t, t) + addrs = append(addrs, addr) } - case <-ccb.closed.Done(): + ccs.ResolverState.Addresses = addrs } + errCh <- ccb.balancer.UpdateClientConnState(*ccs) + }) + if !ok { + // If we are unable to schedule a function with the serializer, it + // indicates that it has been closed. A serializer is only closed when + // the wrapper is closed or is in idle. + ccb.mu.Unlock() + return fmt.Errorf("grpc: cannot send state update to a closed or idle balancer") + } + ccb.mu.Unlock() - if ccb.closed.HasFired() { - ccb.balancerMu.Lock() - ccb.balancer.Close() - ccb.balancerMu.Unlock() - ccb.mu.Lock() - scs := ccb.subConns - ccb.subConns = nil - ccb.mu.Unlock() - ccb.UpdateState(balancer.State{ConnectivityState: connectivity.Connecting, Picker: nil}) - ccb.done.Fire() - // Fire done before removing the addr conns. We can safely unblock - // ccb.close and allow the removeAddrConns to happen - // asynchronously. - for acbw := range scs { - ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain) - } + // We get here only if the above call to Schedule succeeds, in which case it + // is guaranteed that the scheduled function will run. Therefore it is safe + // to block on this channel. + err := <-errCh + if logger.V(2) && err != nil { + logger.Infof("error from balancer.UpdateClientConnState: %v", err) + } + return err +} + +// updateSubConnState is invoked by grpc to push a subConn state update to the +// underlying balancer. +func (ccb *ccBalancerWrapper) updateSubConnState(sc balancer.SubConn, s connectivity.State, err error) { + ccb.mu.Lock() + ccb.serializer.Schedule(func(_ context.Context) { + ccb.balancer.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: s, ConnectionError: err}) + }) + ccb.mu.Unlock() +} + +func (ccb *ccBalancerWrapper) resolverError(err error) { + ccb.mu.Lock() + ccb.serializer.Schedule(func(_ context.Context) { + ccb.balancer.ResolverError(err) + }) + ccb.mu.Unlock() +} + +// switchTo is invoked by grpc to instruct the balancer wrapper to switch to the +// LB policy identified by name. +// +// ClientConn calls newCCBalancerWrapper() at creation time. Upon receipt of the +// first good update from the name resolver, it determines the LB policy to use +// and invokes the switchTo() method. Upon receipt of every subsequent update +// from the name resolver, it invokes this method. +// +// the ccBalancerWrapper keeps track of the current LB policy name, and skips +// the graceful balancer switching process if the name does not change. +func (ccb *ccBalancerWrapper) switchTo(name string) { + ccb.mu.Lock() + ccb.serializer.Schedule(func(_ context.Context) { + // TODO: Other languages use case-sensitive balancer registries. We should + // switch as well. See: https://github.com/grpc/grpc-go/issues/5288. + if strings.EqualFold(ccb.curBalancerName, name) { return } + ccb.buildLoadBalancingPolicy(name) + }) + ccb.mu.Unlock() +} + +// buildLoadBalancingPolicy performs the following: +// - retrieve a balancer builder for the given name. Use the default LB +// policy, pick_first, if no LB policy with name is found in the registry. +// - instruct the gracefulswitch balancer to switch to the above builder. This +// will actually build the new balancer. +// - update the `curBalancerName` field +// +// Must be called from a serializer callback. +func (ccb *ccBalancerWrapper) buildLoadBalancingPolicy(name string) { + builder := balancer.Get(name) + if builder == nil { + channelz.Warningf(logger, ccb.cc.channelzID, "Channel switches to new LB policy %q, since the specified LB policy %q was not registered", PickFirstBalancerName, name) + builder = newPickfirstBuilder() + } else { + channelz.Infof(logger, ccb.cc.channelzID, "Channel switches to new LB policy %q", name) } + + if err := ccb.balancer.SwitchTo(builder); err != nil { + channelz.Errorf(logger, ccb.cc.channelzID, "Channel failed to build new LB policy %q: %v", name, err) + return + } + ccb.curBalancerName = builder.Name() } func (ccb *ccBalancerWrapper) close() { - ccb.closed.Fire() - <-ccb.done.Done() + channelz.Info(logger, ccb.cc.channelzID, "ccBalancerWrapper: closing") + ccb.closeBalancer(ccbModeClosed) } -func (ccb *ccBalancerWrapper) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State, err error) { - // When updating addresses for a SubConn, if the address in use is not in - // the new addresses, the old ac will be tearDown() and a new ac will be - // created. tearDown() generates a state change with Shutdown state, we - // don't want the balancer to receive this state change. So before - // tearDown() on the old ac, ac.acbw (acWrapper) will be set to nil, and - // this function will be called with (nil, Shutdown). We don't need to call - // balancer method in this case. - if sc == nil { +// enterIdleMode is invoked by grpc when the channel enters idle mode upon +// expiry of idle_timeout. This call blocks until the balancer is closed. +func (ccb *ccBalancerWrapper) enterIdleMode() { + channelz.Info(logger, ccb.cc.channelzID, "ccBalancerWrapper: entering idle mode") + ccb.closeBalancer(ccbModeIdle) +} + +// closeBalancer is invoked when the channel is being closed or when it enters +// idle mode upon expiry of idle_timeout. +func (ccb *ccBalancerWrapper) closeBalancer(m ccbMode) { + ccb.mu.Lock() + if ccb.mode == ccbModeClosed || ccb.mode == ccbModeIdle { + ccb.mu.Unlock() return } - ccb.updateCh.Put(&scStateUpdate{ - sc: sc, - state: s, - err: err, + + ccb.mode = m + done := ccb.serializer.Done + b := ccb.balancer + ok := ccb.serializer.Schedule(func(_ context.Context) { + // Close the serializer to ensure that no more calls from gRPC are sent + // to the balancer. + ccb.serializerCancel() + // Empty the current balancer name because we don't have a balancer + // anymore and also so that we act on the next call to switchTo by + // creating a new balancer specified by the new resolver. + ccb.curBalancerName = "" }) + if !ok { + ccb.mu.Unlock() + return + } + ccb.mu.Unlock() + + // Give enqueued callbacks a chance to finish. + <-done + // Spawn a goroutine to close the balancer (since it may block trying to + // cleanup all allocated resources) and return early. + go b.Close() } -func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error { - ccb.balancerMu.Lock() - defer ccb.balancerMu.Unlock() - return ccb.balancer.UpdateClientConnState(*ccs) +// exitIdleMode is invoked by grpc when the channel exits idle mode either +// because of an RPC or because of an invocation of the Connect() API. This +// recreates the balancer that was closed previously when entering idle mode. +// +// If the channel is not in idle mode, we know for a fact that we are here as a +// result of the user calling the Connect() method on the ClientConn. In this +// case, we can simply forward the call to the underlying balancer, instructing +// it to reconnect to the backends. +func (ccb *ccBalancerWrapper) exitIdleMode() { + ccb.mu.Lock() + if ccb.mode == ccbModeClosed { + // Request to exit idle is a no-op when wrapper is already closed. + ccb.mu.Unlock() + return + } + + if ccb.mode == ccbModeIdle { + // Recreate the serializer which was closed when we entered idle. + ctx, cancel := context.WithCancel(context.Background()) + ccb.serializer = grpcsync.NewCallbackSerializer(ctx) + ccb.serializerCancel = cancel + } + + // The ClientConn guarantees that mutual exclusion between close() and + // exitIdleMode(), and since we just created a new serializer, we can be + // sure that the below function will be scheduled. + done := make(chan struct{}) + ccb.serializer.Schedule(func(_ context.Context) { + defer close(done) + + ccb.mu.Lock() + defer ccb.mu.Unlock() + + if ccb.mode != ccbModeIdle { + ccb.balancer.ExitIdle() + return + } + + // Gracefulswitch balancer does not support a switchTo operation after + // being closed. Hence we need to create a new one here. + ccb.balancer = gracefulswitch.NewBalancer(ccb, ccb.opts) + ccb.mode = ccbModeActive + channelz.Info(logger, ccb.cc.channelzID, "ccBalancerWrapper: exiting idle mode") + + }) + ccb.mu.Unlock() + + <-done } -func (ccb *ccBalancerWrapper) resolverError(err error) { - ccb.balancerMu.Lock() - ccb.balancer.ResolverError(err) - ccb.balancerMu.Unlock() +func (ccb *ccBalancerWrapper) isIdleOrClosed() bool { + ccb.mu.Lock() + defer ccb.mu.Unlock() + return ccb.mode == ccbModeIdle || ccb.mode == ccbModeClosed } func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { - if len(addrs) <= 0 { - return nil, fmt.Errorf("grpc: cannot create SubConn with empty address list") + if ccb.isIdleOrClosed() { + return nil, fmt.Errorf("grpc: cannot create SubConn when balancer is closed or idle") } - ccb.mu.Lock() - defer ccb.mu.Unlock() - if ccb.subConns == nil { - return nil, fmt.Errorf("grpc: ClientConn balancer wrapper was closed") + + if len(addrs) == 0 { + return nil, fmt.Errorf("grpc: cannot create SubConn with empty address list") } ac, err := ccb.cc.newAddrConn(addrs, opts) if err != nil { + channelz.Warningf(logger, ccb.cc.channelzID, "acBalancerWrapper: NewSubConn: failed to newAddrConn: %v", err) return nil, err } - acbw := &acBalancerWrapper{ac: ac} - acbw.ac.mu.Lock() + acbw := &acBalancerWrapper{ac: ac, producers: make(map[balancer.ProducerBuilder]*refCountedProducer)} ac.acbw = acbw - acbw.ac.mu.Unlock() - ccb.subConns[acbw] = struct{}{} return acbw, nil } func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) { - // The RemoveSubConn() is handled in the run() goroutine, to avoid deadlock - // during switchBalancer() if the old balancer calls RemoveSubConn() in its - // Close(). - ccb.updateCh.Put(sc) + if ccb.isIdleOrClosed() { + // It it safe to ignore this call when the balancer is closed or in idle + // because the ClientConn takes care of closing the connections. + // + // Not returning early from here when the balancer is closed or in idle + // leads to a deadlock though, because of the following sequence of + // calls when holding cc.mu: + // cc.exitIdleMode --> ccb.enterIdleMode --> gsw.Close --> + // ccb.RemoveAddrConn --> cc.removeAddrConn + return + } + + acbw, ok := sc.(*acBalancerWrapper) + if !ok { + return + } + ccb.cc.removeAddrConn(acbw.ac, errConnDrain) } func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { + if ccb.isIdleOrClosed() { + return + } + acbw, ok := sc.(*acBalancerWrapper) if !ok { return @@ -185,11 +352,10 @@ func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resol } func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) { - ccb.mu.Lock() - defer ccb.mu.Unlock() - if ccb.subConns == nil { + if ccb.isIdleOrClosed() { return } + // Update picker before updating state. Even though the ordering here does // not matter, it can lead to multiple calls of Pick in the common start-up // case where we wait for ready and then perform an RPC. If the picker is @@ -200,6 +366,10 @@ func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) { } func (ccb *ccBalancerWrapper) ResolveNow(o resolver.ResolveNowOptions) { + if ccb.isIdleOrClosed() { + return + } + ccb.cc.resolveNow(o) } @@ -210,58 +380,80 @@ func (ccb *ccBalancerWrapper) Target() string { // acBalancerWrapper is a wrapper on top of ac for balancers. // It implements balancer.SubConn interface. type acBalancerWrapper struct { - mu sync.Mutex - ac *addrConn + ac *addrConn // read-only + + mu sync.Mutex + producers map[balancer.ProducerBuilder]*refCountedProducer +} + +func (acbw *acBalancerWrapper) String() string { + return fmt.Sprintf("SubConn(id:%d)", acbw.ac.channelzID.Int()) } func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) { - acbw.mu.Lock() - defer acbw.mu.Unlock() - if len(addrs) <= 0 { - acbw.ac.cc.removeAddrConn(acbw.ac, errConnDrain) - return + acbw.ac.updateAddrs(addrs) +} + +func (acbw *acBalancerWrapper) Connect() { + go acbw.ac.connect() +} + +// NewStream begins a streaming RPC on the addrConn. If the addrConn is not +// ready, blocks until it is or ctx expires. Returns an error when the context +// expires or the addrConn is shut down. +func (acbw *acBalancerWrapper) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) { + transport, err := acbw.ac.getTransport(ctx) + if err != nil { + return nil, err } - if !acbw.ac.tryUpdateAddrs(addrs) { - cc := acbw.ac.cc - opts := acbw.ac.scopts - acbw.ac.mu.Lock() - // Set old ac.acbw to nil so the Shutdown state update will be ignored - // by balancer. - // - // TODO(bar) the state transition could be wrong when tearDown() old ac - // and creating new ac, fix the transition. - acbw.ac.acbw = nil - acbw.ac.mu.Unlock() - acState := acbw.ac.getState() - acbw.ac.cc.removeAddrConn(acbw.ac, errConnDrain) - - if acState == connectivity.Shutdown { - return - } + return newNonRetryClientStream(ctx, desc, method, transport, acbw.ac, opts...) +} - ac, err := cc.newAddrConn(addrs, opts) - if err != nil { - channelz.Warningf(logger, acbw.ac.channelzID, "acBalancerWrapper: UpdateAddresses: failed to newAddrConn: %v", err) - return - } - acbw.ac = ac - ac.mu.Lock() - ac.acbw = acbw - ac.mu.Unlock() - if acState != connectivity.Idle { - ac.connect() - } +// Invoke performs a unary RPC. If the addrConn is not ready, returns +// errSubConnNotReady. +func (acbw *acBalancerWrapper) Invoke(ctx context.Context, method string, args interface{}, reply interface{}, opts ...CallOption) error { + cs, err := acbw.NewStream(ctx, unaryStreamDesc, method, opts...) + if err != nil { + return err } + if err := cs.SendMsg(args); err != nil { + return err + } + return cs.RecvMsg(reply) } -func (acbw *acBalancerWrapper) Connect() { - acbw.mu.Lock() - defer acbw.mu.Unlock() - acbw.ac.connect() +type refCountedProducer struct { + producer balancer.Producer + refs int // number of current refs to the producer + close func() // underlying producer's close function } -func (acbw *acBalancerWrapper) getAddrConn() *addrConn { +func (acbw *acBalancerWrapper) GetOrBuildProducer(pb balancer.ProducerBuilder) (balancer.Producer, func()) { acbw.mu.Lock() defer acbw.mu.Unlock() - return acbw.ac + + // Look up existing producer from this builder. + pData := acbw.producers[pb] + if pData == nil { + // Not found; create a new one and add it to the producers map. + p, close := pb.Build(acbw) + pData = &refCountedProducer{producer: p, close: close} + acbw.producers[pb] = pData + } + // Account for this new reference. + pData.refs++ + + // Return a cleanup function wrapped in a OnceFunc to remove this reference + // and delete the refCountedProducer from the map if the total reference + // count goes to zero. + unref := func() { + acbw.mu.Lock() + pData.refs-- + if pData.refs == 0 { + defer pData.close() // Run outside the acbw mutex + delete(acbw.producers, pb) + } + acbw.mu.Unlock() + } + return pData.producer, grpcsync.OnceFunc(unref) } diff --git a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go index ed75290cd..ec2c2fa14 100644 --- a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go +++ b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go @@ -18,14 +18,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0 -// protoc v3.14.0 +// protoc-gen-go v1.30.0 +// protoc v4.22.0 // source: grpc/binlog/v1/binarylog.proto package grpc_binarylog_v1 import ( - proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" durationpb "google.golang.org/protobuf/types/known/durationpb" @@ -41,10 +40,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - // Enumerates the type of event // Note the terminology is different from the RPC semantics // definition, but the same meaning is expressed here. @@ -261,6 +256,7 @@ type GrpcLogEntry struct { // according to the type of the log entry. // // Types that are assignable to Payload: + // // *GrpcLogEntry_ClientHeader // *GrpcLogEntry_ServerHeader // *GrpcLogEntry_Message @@ -694,12 +690,12 @@ func (x *Message) GetData() []byte { // Header keys added by gRPC are omitted. To be more specific, // implementations will not log the following entries, and this is // not to be treated as a truncation: -// - entries handled by grpc that are not user visible, such as those -// that begin with 'grpc-' (with exception of grpc-trace-bin) -// or keys like 'lb-token' -// - transport specific entries, including but not limited to: -// ':path', ':authority', 'content-encoding', 'user-agent', 'te', etc -// - entries added for call credentials +// - entries handled by grpc that are not user visible, such as those +// that begin with 'grpc-' (with exception of grpc-trace-bin) +// or keys like 'lb-token' +// - transport specific entries, including but not limited to: +// ':path', ':authority', 'content-encoding', 'user-agent', 'te', etc +// - entries added for call credentials // // Implementations must always log grpc-trace-bin if it is present. // Practically speaking it will only be visible on server side because diff --git a/vendor/google.golang.org/grpc/call.go b/vendor/google.golang.org/grpc/call.go index 9e20e4d38..e6a1dc5d7 100644 --- a/vendor/google.golang.org/grpc/call.go +++ b/vendor/google.golang.org/grpc/call.go @@ -27,6 +27,11 @@ import ( // // All errors returned by Invoke are compatible with the status package. func (cc *ClientConn) Invoke(ctx context.Context, method string, args, reply interface{}, opts ...CallOption) error { + if err := cc.idlenessMgr.onCallBegin(); err != nil { + return err + } + defer cc.idlenessMgr.onCallEnd() + // allow interceptor to see all applicable call options, which means those // configured as defaults from dial option as well as per-call options opts = combine(cc.dopts.callOptions, opts) diff --git a/vendor/google.golang.org/grpc/channelz/channelz.go b/vendor/google.golang.org/grpc/channelz/channelz.go new file mode 100644 index 000000000..32b7fa579 --- /dev/null +++ b/vendor/google.golang.org/grpc/channelz/channelz.go @@ -0,0 +1,36 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package channelz exports internals of the channelz implementation as required +// by other gRPC packages. +// +// The implementation of the channelz spec as defined in +// https://github.com/grpc/proposal/blob/master/A14-channelz.md, is provided by +// the `internal/channelz` package. +// +// # Experimental +// +// Notice: All APIs in this package are experimental and may be removed in a +// later release. +package channelz + +import "google.golang.org/grpc/internal/channelz" + +// Identifier is an opaque identifier which uniquely identifies an entity in the +// channelz database. +type Identifier = channelz.Identifier diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go index b2bccfed1..95a7459b0 100644 --- a/vendor/google.golang.org/grpc/clientconn.go +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -23,7 +23,7 @@ import ( "errors" "fmt" "math" - "reflect" + "net/url" "strings" "sync" "sync/atomic" @@ -37,7 +37,6 @@ import ( "google.golang.org/grpc/internal/backoff" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" - "google.golang.org/grpc/internal/grpcutil" iresolver "google.golang.org/grpc/internal/resolver" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" @@ -69,6 +68,9 @@ var ( errConnDrain = errors.New("grpc: the connection is drained") // errConnClosing indicates that the connection is closing. errConnClosing = errors.New("grpc: the connection is closing") + // errConnIdling indicates the the connection is being closed as the channel + // is moving to an idle mode due to inactivity. + errConnIdling = errors.New("grpc: the connection is closing due to channel idleness") // invalidDefaultServiceConfigErrPrefix is used to prefix the json parsing error for the default // service config. invalidDefaultServiceConfigErrPrefix = "grpc: the provided default service config is invalid" @@ -79,17 +81,17 @@ var ( // errNoTransportSecurity indicates that there is no transport security // being set for ClientConn. Users should either set one or explicitly // call WithInsecure DialOption to disable security. - errNoTransportSecurity = errors.New("grpc: no transport security set (use grpc.WithInsecure() explicitly or set credentials)") + errNoTransportSecurity = errors.New("grpc: no transport security set (use grpc.WithTransportCredentials(insecure.NewCredentials()) explicitly or set credentials)") // errTransportCredsAndBundle indicates that creds bundle is used together // with other individual Transport Credentials. errTransportCredsAndBundle = errors.New("grpc: credentials.Bundle may not be used with individual TransportCredentials") - // errTransportCredentialsMissing indicates that users want to transmit security - // information (e.g., OAuth2 token) which requires secure connection on an insecure - // connection. + // errNoTransportCredsInBundle indicated that the configured creds bundle + // returned a transport credentials which was nil. + errNoTransportCredsInBundle = errors.New("grpc: credentials.Bundle must return non-nil transport credentials") + // errTransportCredentialsMissing indicates that users want to transmit + // security information (e.g., OAuth2 token) which requires secure + // connection on an insecure connection. errTransportCredentialsMissing = errors.New("grpc: the credentials require transport level security (use grpc.WithTransportCredentials() to set)") - // errCredentialsConflict indicates that grpc.WithTransportCredentials() - // and grpc.WithInsecure() are both called for a connection. - errCredentialsConflict = errors.New("grpc: transport credentials are set for an insecure connection (grpc.WithTransportCredentials() and grpc.WithInsecure() are both called)") ) const ( @@ -134,17 +136,43 @@ func (dcs *defaultConfigSelector) SelectConfig(rpcInfo iresolver.RPCInfo) (*ires // e.g. to use dns resolver, a "dns:///" prefix should be applied to the target. func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) { cc := &ClientConn{ - target: target, - csMgr: &connectivityStateManager{}, - conns: make(map[*addrConn]struct{}), - dopts: defaultDialOptions(), - blockingpicker: newPickerWrapper(), - czData: new(channelzData), - firstResolveEvent: grpcsync.NewEvent(), - } + target: target, + csMgr: &connectivityStateManager{}, + conns: make(map[*addrConn]struct{}), + dopts: defaultDialOptions(), + czData: new(channelzData), + } + + // We start the channel off in idle mode, but kick it out of idle at the end + // of this method, instead of waiting for the first RPC. Other gRPC + // implementations do wait for the first RPC to kick the channel out of + // idle. But doing so would be a major behavior change for our users who are + // used to seeing the channel active after Dial. + // + // Taking this approach of kicking it out of idle at the end of this method + // allows us to share the code between channel creation and exiting idle + // mode. This will also make it easy for us to switch to starting the + // channel off in idle, if at all we ever get to do that. + cc.idlenessState = ccIdlenessStateIdle + cc.retryThrottler.Store((*retryThrottler)(nil)) cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{nil}) cc.ctx, cc.cancel = context.WithCancel(context.Background()) + cc.exitIdleCond = sync.NewCond(&cc.mu) + + disableGlobalOpts := false + for _, opt := range opts { + if _, ok := opt.(*disableGlobalDialOptions); ok { + disableGlobalOpts = true + break + } + } + + if !disableGlobalOpts { + for _, opt := range globalDialOptions { + opt.apply(&cc.dopts) + } + } for _, opt := range opts { opt.apply(&cc.dopts) @@ -159,40 +187,11 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * } }() - if channelz.IsOn() { - if cc.dopts.channelzParentID != 0 { - cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, cc.dopts.channelzParentID, target) - channelz.AddTraceEvent(logger, cc.channelzID, 0, &channelz.TraceEventDesc{ - Desc: "Channel Created", - Severity: channelz.CtInfo, - Parent: &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Nested Channel(id:%d) created", cc.channelzID), - Severity: channelz.CtInfo, - }, - }) - } else { - cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, 0, target) - channelz.Info(logger, cc.channelzID, "Channel Created") - } - cc.csMgr.channelzID = cc.channelzID - } + // Register ClientConn with channelz. + cc.channelzRegistration(target) - if !cc.dopts.insecure { - if cc.dopts.copts.TransportCredentials == nil && cc.dopts.copts.CredsBundle == nil { - return nil, errNoTransportSecurity - } - if cc.dopts.copts.TransportCredentials != nil && cc.dopts.copts.CredsBundle != nil { - return nil, errTransportCredsAndBundle - } - } else { - if cc.dopts.copts.TransportCredentials != nil || cc.dopts.copts.CredsBundle != nil { - return nil, errCredentialsConflict - } - for _, cd := range cc.dopts.copts.PerRPCCredentials { - if cd.RequireTransportSecurity() { - return nil, errTransportCredentialsMissing - } - } + if err := cc.validateTransportCredentials(); err != nil { + return nil, err } if cc.dopts.defaultServiceConfigRawJSON != nil { @@ -230,58 +229,19 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * } }() - scSet := false - if cc.dopts.scChan != nil { - // Try to get an initial service config. - select { - case sc, ok := <-cc.dopts.scChan: - if ok { - cc.sc = &sc - cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{&sc}) - scSet = true - } - default: - } - } if cc.dopts.bs == nil { cc.dopts.bs = backoff.DefaultExponential } // Determine the resolver to use. - cc.parsedTarget = grpcutil.ParseTarget(cc.target, cc.dopts.copts.Dialer != nil) - channelz.Infof(logger, cc.channelzID, "parsed scheme: %q", cc.parsedTarget.Scheme) - resolverBuilder := cc.getResolver(cc.parsedTarget.Scheme) - if resolverBuilder == nil { - // If resolver builder is still nil, the parsed target's scheme is - // not registered. Fallback to default resolver and set Endpoint to - // the original target. - channelz.Infof(logger, cc.channelzID, "scheme %q not registered, fallback to default scheme", cc.parsedTarget.Scheme) - cc.parsedTarget = resolver.Target{ - Scheme: resolver.GetDefaultScheme(), - Endpoint: target, - } - resolverBuilder = cc.getResolver(cc.parsedTarget.Scheme) - if resolverBuilder == nil { - return nil, fmt.Errorf("could not get resolver for default scheme: %q", cc.parsedTarget.Scheme) - } + if err := cc.parseTargetAndFindResolver(); err != nil { + return nil, err } - - creds := cc.dopts.copts.TransportCredentials - if creds != nil && creds.Info().ServerName != "" { - cc.authority = creds.Info().ServerName - } else if cc.dopts.insecure && cc.dopts.authority != "" { - cc.authority = cc.dopts.authority - } else if strings.HasPrefix(cc.target, "unix:") || strings.HasPrefix(cc.target, "unix-abstract:") { - cc.authority = "localhost" - } else if strings.HasPrefix(cc.parsedTarget.Endpoint, ":") { - cc.authority = "localhost" + cc.parsedTarget.Endpoint - } else { - // Use endpoint from "scheme://authority/endpoint" as the default - // authority for ClientConn. - cc.authority = cc.parsedTarget.Endpoint + if err = cc.determineAuthority(); err != nil { + return nil, err } - if cc.dopts.scChan != nil && !scSet { + if cc.dopts.scChan != nil { // Blocking wait for the initial service config. select { case sc, ok := <-cc.dopts.scChan: @@ -297,55 +257,224 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * go cc.scWatcher() } + // This creates the name resolver, load balancer, blocking picker etc. + if err := cc.exitIdleMode(); err != nil { + return nil, err + } + + // Configure idleness support with configured idle timeout or default idle + // timeout duration. Idleness can be explicitly disabled by the user, by + // setting the dial option to 0. + cc.idlenessMgr = newIdlenessManager(cc, cc.dopts.idleTimeout) + + // Return early for non-blocking dials. + if !cc.dopts.block { + return cc, nil + } + + // A blocking dial blocks until the clientConn is ready. + for { + s := cc.GetState() + if s == connectivity.Idle { + cc.Connect() + } + if s == connectivity.Ready { + return cc, nil + } else if cc.dopts.copts.FailOnNonTempDialError && s == connectivity.TransientFailure { + if err = cc.connectionError(); err != nil { + terr, ok := err.(interface { + Temporary() bool + }) + if ok && !terr.Temporary() { + return nil, err + } + } + } + if !cc.WaitForStateChange(ctx, s) { + // ctx got timeout or canceled. + if err = cc.connectionError(); err != nil && cc.dopts.returnLastError { + return nil, err + } + return nil, ctx.Err() + } + } +} + +// addTraceEvent is a helper method to add a trace event on the channel. If the +// channel is a nested one, the same event is also added on the parent channel. +func (cc *ClientConn) addTraceEvent(msg string) { + ted := &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Channel %s", msg), + Severity: channelz.CtInfo, + } + if cc.dopts.channelzParentID != nil { + ted.Parent = &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Nested channel(id:%d) %s", cc.channelzID.Int(), msg), + Severity: channelz.CtInfo, + } + } + channelz.AddTraceEvent(logger, cc.channelzID, 0, ted) +} + +// exitIdleMode moves the channel out of idle mode by recreating the name +// resolver and load balancer. +func (cc *ClientConn) exitIdleMode() error { + cc.mu.Lock() + if cc.conns == nil { + cc.mu.Unlock() + return errConnClosing + } + if cc.idlenessState != ccIdlenessStateIdle { + cc.mu.Unlock() + logger.Info("ClientConn asked to exit idle mode when not in idle mode") + return nil + } + + defer func() { + // When Close() and exitIdleMode() race against each other, one of the + // following two can happen: + // - Close() wins the race and runs first. exitIdleMode() runs after, and + // sees that the ClientConn is already closed and hence returns early. + // - exitIdleMode() wins the race and runs first and recreates the balancer + // and releases the lock before recreating the resolver. If Close() runs + // in this window, it will wait for exitIdleMode to complete. + // + // We achieve this synchronization using the below condition variable. + cc.mu.Lock() + cc.idlenessState = ccIdlenessStateActive + cc.exitIdleCond.Signal() + cc.mu.Unlock() + }() + + cc.idlenessState = ccIdlenessStateExitingIdle + exitedIdle := false + if cc.blockingpicker == nil { + cc.blockingpicker = newPickerWrapper() + } else { + cc.blockingpicker.exitIdleMode() + exitedIdle = true + } + var credsClone credentials.TransportCredentials if creds := cc.dopts.copts.TransportCredentials; creds != nil { credsClone = creds.Clone() } - cc.balancerBuildOpts = balancer.BuildOptions{ - DialCreds: credsClone, - CredsBundle: cc.dopts.copts.CredsBundle, - Dialer: cc.dopts.copts.Dialer, - CustomUserAgent: cc.dopts.copts.UserAgent, - ChannelzParentID: cc.channelzID, - Target: cc.parsedTarget, + if cc.balancerWrapper == nil { + cc.balancerWrapper = newCCBalancerWrapper(cc, balancer.BuildOptions{ + DialCreds: credsClone, + CredsBundle: cc.dopts.copts.CredsBundle, + Dialer: cc.dopts.copts.Dialer, + Authority: cc.authority, + CustomUserAgent: cc.dopts.copts.UserAgent, + ChannelzParentID: cc.channelzID, + Target: cc.parsedTarget, + }) + } else { + cc.balancerWrapper.exitIdleMode() } + cc.firstResolveEvent = grpcsync.NewEvent() + cc.mu.Unlock() - // Build the resolver. - rWrapper, err := newCCResolverWrapper(cc, resolverBuilder) - if err != nil { - return nil, fmt.Errorf("failed to build resolver: %v", err) + // This needs to be called without cc.mu because this builds a new resolver + // which might update state or report error inline which needs to be handled + // by cc.updateResolverState() which also grabs cc.mu. + if err := cc.initResolverWrapper(credsClone); err != nil { + return err + } + + if exitedIdle { + cc.addTraceEvent("exiting idle mode") } + return nil +} + +// enterIdleMode puts the channel in idle mode, and as part of it shuts down the +// name resolver, load balancer and any subchannels. +func (cc *ClientConn) enterIdleMode() error { cc.mu.Lock() - cc.resolverWrapper = rWrapper + if cc.conns == nil { + cc.mu.Unlock() + return ErrClientConnClosing + } + if cc.idlenessState != ccIdlenessStateActive { + logger.Error("ClientConn asked to enter idle mode when not active") + return nil + } + + // cc.conns == nil is a proxy for the ClientConn being closed. So, instead + // of setting it to nil here, we recreate the map. This also means that we + // don't have to do this when exiting idle mode. + conns := cc.conns + cc.conns = make(map[*addrConn]struct{}) + + // TODO: Currently, we close the resolver wrapper upon entering idle mode + // and create a new one upon exiting idle mode. This means that the + // `cc.resolverWrapper` field would be overwritten everytime we exit idle + // mode. While this means that we need to hold `cc.mu` when accessing + // `cc.resolverWrapper`, it makes the code simpler in the wrapper. We should + // try to do the same for the balancer and picker wrappers too. + cc.resolverWrapper.close() + cc.blockingpicker.enterIdleMode() + cc.balancerWrapper.enterIdleMode() + cc.csMgr.updateState(connectivity.Idle) + cc.idlenessState = ccIdlenessStateIdle cc.mu.Unlock() - // A blocking dial blocks until the clientConn is ready. - if cc.dopts.block { - for { - s := cc.GetState() - if s == connectivity.Ready { - break - } else if cc.dopts.copts.FailOnNonTempDialError && s == connectivity.TransientFailure { - if err = cc.connectionError(); err != nil { - terr, ok := err.(interface { - Temporary() bool - }) - if ok && !terr.Temporary() { - return nil, err - } - } - } - if !cc.WaitForStateChange(ctx, s) { - // ctx got timeout or canceled. - if err = cc.connectionError(); err != nil && cc.dopts.returnLastError { - return nil, err - } - return nil, ctx.Err() + go func() { + cc.addTraceEvent("entering idle mode") + for ac := range conns { + ac.tearDown(errConnIdling) + } + }() + return nil +} + +// validateTransportCredentials performs a series of checks on the configured +// transport credentials. It returns a non-nil error if any of these conditions +// are met: +// - no transport creds and no creds bundle is configured +// - both transport creds and creds bundle are configured +// - creds bundle is configured, but it lacks a transport credentials +// - insecure transport creds configured alongside call creds that require +// transport level security +// +// If none of the above conditions are met, the configured credentials are +// deemed valid and a nil error is returned. +func (cc *ClientConn) validateTransportCredentials() error { + if cc.dopts.copts.TransportCredentials == nil && cc.dopts.copts.CredsBundle == nil { + return errNoTransportSecurity + } + if cc.dopts.copts.TransportCredentials != nil && cc.dopts.copts.CredsBundle != nil { + return errTransportCredsAndBundle + } + if cc.dopts.copts.CredsBundle != nil && cc.dopts.copts.CredsBundle.TransportCredentials() == nil { + return errNoTransportCredsInBundle + } + transportCreds := cc.dopts.copts.TransportCredentials + if transportCreds == nil { + transportCreds = cc.dopts.copts.CredsBundle.TransportCredentials() + } + if transportCreds.Info().SecurityProtocol == "insecure" { + for _, cd := range cc.dopts.copts.PerRPCCredentials { + if cd.RequireTransportSecurity() { + return errTransportCredentialsMissing } } } + return nil +} - return cc, nil +// channelzRegistration registers the newly created ClientConn with channelz and +// stores the returned identifier in `cc.channelzID` and `cc.csMgr.channelzID`. +// A channelz trace event is emitted for ClientConn creation. If the newly +// created ClientConn is a nested one, i.e a valid parent ClientConn ID is +// specified via a dial option, the trace event is also added to the parent. +// +// Doesn't grab cc.mu as this method is expected to be called only at Dial time. +func (cc *ClientConn) channelzRegistration(target string) { + cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, cc.dopts.channelzParentID, target) + cc.addTraceEvent("created") + cc.csMgr.channelzID = cc.channelzID } // chainUnaryClientInterceptors chains all unary client interceptors into one. @@ -416,7 +545,7 @@ type connectivityStateManager struct { mu sync.Mutex state connectivity.State notifyChan chan struct{} - channelzID int64 + channelzID *channelz.Identifier } // updateState updates the connectivity.State of ClientConn. @@ -482,43 +611,67 @@ var _ ClientConnInterface = (*ClientConn)(nil) // handshakes. It also handles errors on established connections by // re-resolving the name and reconnecting. type ClientConn struct { - ctx context.Context - cancel context.CancelFunc - - target string - parsedTarget resolver.Target - authority string - dopts dialOptions - csMgr *connectivityStateManager - - balancerBuildOpts balancer.BuildOptions - blockingpicker *pickerWrapper - + ctx context.Context // Initialized using the background context at dial time. + cancel context.CancelFunc // Cancelled on close. + + // The following are initialized at dial time, and are read-only after that. + target string // User's dial target. + parsedTarget resolver.Target // See parseTargetAndFindResolver(). + authority string // See determineAuthority(). + dopts dialOptions // Default and user specified dial options. + channelzID *channelz.Identifier // Channelz identifier for the channel. + resolverBuilder resolver.Builder // See parseTargetAndFindResolver(). + balancerWrapper *ccBalancerWrapper // Uses gracefulswitch.balancer underneath. + idlenessMgr idlenessManager + + // The following provide their own synchronization, and therefore don't + // require cc.mu to be held to access them. + csMgr *connectivityStateManager + blockingpicker *pickerWrapper safeConfigSelector iresolver.SafeConfigSelector + czData *channelzData + retryThrottler atomic.Value // Updated from service config. - mu sync.RWMutex - resolverWrapper *ccResolverWrapper - sc *ServiceConfig - conns map[*addrConn]struct{} - // Keepalive parameter can be updated if a GoAway is received. - mkp keepalive.ClientParameters - curBalancerName string - balancerWrapper *ccBalancerWrapper - retryThrottler atomic.Value - + // firstResolveEvent is used to track whether the name resolver sent us at + // least one update. RPCs block on this event. firstResolveEvent *grpcsync.Event - channelzID int64 // channelz unique identification number - czData *channelzData + // mu protects the following fields. + // TODO: split mu so the same mutex isn't used for everything. + mu sync.RWMutex + resolverWrapper *ccResolverWrapper // Initialized in Dial; cleared in Close. + sc *ServiceConfig // Latest service config received from the resolver. + conns map[*addrConn]struct{} // Set to nil on close. + mkp keepalive.ClientParameters // May be updated upon receipt of a GoAway. + idlenessState ccIdlenessState // Tracks idleness state of the channel. + exitIdleCond *sync.Cond // Signalled when channel exits idle. lceMu sync.Mutex // protects lastConnectionError lastConnectionError error } +// ccIdlenessState tracks the idleness state of the channel. +// +// Channels start off in `active` and move to `idle` after a period of +// inactivity. When moving back to `active` upon an incoming RPC, they +// transition through `exiting_idle`. This state is useful for synchronization +// with Close(). +// +// This state tracking is mostly for self-protection. The idlenessManager is +// expected to keep track of the state as well, and is expected not to call into +// the ClientConn unnecessarily. +type ccIdlenessState int8 + +const ( + ccIdlenessStateActive ccIdlenessState = iota + ccIdlenessStateIdle + ccIdlenessStateExitingIdle +) + // WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or // ctx expires. A true value is returned in former case and false in latter. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -537,14 +690,29 @@ func (cc *ClientConn) WaitForStateChange(ctx context.Context, sourceState connec // GetState returns the connectivity.State of ClientConn. // -// Experimental +// # Experimental // -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. +// Notice: This API is EXPERIMENTAL and may be changed or removed in a later +// release. func (cc *ClientConn) GetState() connectivity.State { return cc.csMgr.getState() } +// Connect causes all subchannels in the ClientConn to attempt to connect if +// the channel is idle. Does not wait for the connection attempts to begin +// before returning. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a later +// release. +func (cc *ClientConn) Connect() { + cc.exitIdleMode() + // If the ClientConn was not in idle mode, we need to call ExitIdle on the + // LB policy so that connections can be created. + cc.balancerWrapper.exitIdleMode() +} + func (cc *ClientConn) scWatcher() { for { select { @@ -622,9 +790,7 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { // with the new addresses. cc.maybeApplyDefaultServiceConfig(nil) - if cc.balancerWrapper != nil { - cc.balancerWrapper.resolverError(err) - } + cc.balancerWrapper.resolverError(err) // No addresses are valid with err set; return early. cc.mu.Unlock() @@ -632,7 +798,10 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { } var ret error - if cc.dopts.disableServiceConfig || s.ServiceConfig == nil { + if cc.dopts.disableServiceConfig { + channelz.Infof(logger, cc.channelzID, "ignoring service config from resolver (%v) and applying the default because service config is disabled", s.ServiceConfig) + cc.maybeApplyDefaultServiceConfig(s.Addresses) + } else if s.ServiceConfig == nil { cc.maybeApplyDefaultServiceConfig(s.Addresses) // TODO: do we need to apply a failing LB policy if there is no // default, per the error handling design? @@ -649,16 +818,10 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { cc.applyServiceConfigAndBalancer(sc, configSelector, s.Addresses) } else { ret = balancer.ErrBadResolverState - if cc.balancerWrapper == nil { - var err error - if s.ServiceConfig.Err != nil { - err = status.Errorf(codes.Unavailable, "error parsing service config: %v", s.ServiceConfig.Err) - } else { - err = status.Errorf(codes.Unavailable, "illegal service config type: %T", s.ServiceConfig.Config) - } - cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{cc.sc}) - cc.blockingpicker.updatePicker(base.NewErrPicker(err)) - cc.csMgr.updateState(connectivity.TransientFailure) + if cc.sc == nil { + // Apply the failing LB only if we haven't received valid service config + // from the name resolver in the past. + cc.applyFailingLB(s.ServiceConfig) cc.mu.Unlock() return ret } @@ -666,24 +829,12 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { } var balCfg serviceconfig.LoadBalancingConfig - if cc.dopts.balancerBuilder == nil && cc.sc != nil && cc.sc.lbConfig != nil { + if cc.sc != nil && cc.sc.lbConfig != nil { balCfg = cc.sc.lbConfig.cfg } - - cbn := cc.curBalancerName bw := cc.balancerWrapper cc.mu.Unlock() - if cbn != grpclbName { - // Filter any grpclb addresses since we don't have the grpclb balancer. - for i := 0; i < len(s.Addresses); { - if s.Addresses[i].Type == resolver.GRPCLB { - copy(s.Addresses[i:], s.Addresses[i+1:]) - s.Addresses = s.Addresses[:len(s.Addresses)-1] - continue - } - i++ - } - } + uccsErr := bw.updateClientConnState(&balancer.ClientConnState{ResolverState: s, BalancerConfig: balCfg}) if ret == nil { ret = uccsErr // prefer ErrBadResolver state since any other error is @@ -692,56 +843,28 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { return ret } -// switchBalancer starts the switching from current balancer to the balancer -// with the given name. -// -// It will NOT send the current address list to the new balancer. If needed, -// caller of this function should send address list to the new balancer after -// this function returns. +// applyFailingLB is akin to configuring an LB policy on the channel which +// always fails RPCs. Here, an actual LB policy is not configured, but an always +// erroring picker is configured, which returns errors with information about +// what was invalid in the received service config. A config selector with no +// service config is configured, and the connectivity state of the channel is +// set to TransientFailure. // // Caller must hold cc.mu. -func (cc *ClientConn) switchBalancer(name string) { - if strings.EqualFold(cc.curBalancerName, name) { - return - } - - channelz.Infof(logger, cc.channelzID, "ClientConn switching balancer to %q", name) - if cc.dopts.balancerBuilder != nil { - channelz.Info(logger, cc.channelzID, "ignoring balancer switching: Balancer DialOption used instead") - return - } - if cc.balancerWrapper != nil { - // Don't hold cc.mu while closing the balancers. The balancers may call - // methods that require cc.mu (e.g. cc.NewSubConn()). Holding the mutex - // would cause a deadlock in that case. - cc.mu.Unlock() - cc.balancerWrapper.close() - cc.mu.Lock() - } - - builder := balancer.Get(name) - if builder == nil { - channelz.Warningf(logger, cc.channelzID, "Channel switches to new LB policy %q due to fallback from invalid balancer name", PickFirstBalancerName) - channelz.Infof(logger, cc.channelzID, "failed to get balancer builder for: %v, using pick_first instead", name) - builder = newPickfirstBuilder() +func (cc *ClientConn) applyFailingLB(sc *serviceconfig.ParseResult) { + var err error + if sc.Err != nil { + err = status.Errorf(codes.Unavailable, "error parsing service config: %v", sc.Err) } else { - channelz.Infof(logger, cc.channelzID, "Channel switches to new LB policy %q", name) + err = status.Errorf(codes.Unavailable, "illegal service config type: %T", sc.Config) } - - cc.curBalancerName = builder.Name() - cc.balancerWrapper = newCCBalancerWrapper(cc, builder, cc.balancerBuildOpts) + cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{nil}) + cc.blockingpicker.updatePicker(base.NewErrPicker(err)) + cc.csMgr.updateState(connectivity.TransientFailure) } func (cc *ClientConn) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State, err error) { - cc.mu.Lock() - if cc.conns == nil { - cc.mu.Unlock() - return - } - // TODO(bar switching) send updates to all balancer wrappers when balancer - // gracefully switching is supported. - cc.balancerWrapper.handleSubConnStateChange(sc, s, err) - cc.mu.Unlock() + cc.balancerWrapper.updateSubConnState(sc, s, err) } // newAddrConn creates an addrConn for addrs and adds it to cc.conns. @@ -756,27 +879,31 @@ func (cc *ClientConn) newAddrConn(addrs []resolver.Address, opts balancer.NewSub dopts: cc.dopts, czData: new(channelzData), resetBackoff: make(chan struct{}), + stateChan: make(chan struct{}), } ac.ctx, ac.cancel = context.WithCancel(cc.ctx) // Track ac in cc. This needs to be done before any getTransport(...) is called. cc.mu.Lock() + defer cc.mu.Unlock() if cc.conns == nil { - cc.mu.Unlock() return nil, ErrClientConnClosing } - if channelz.IsOn() { - ac.channelzID = channelz.RegisterSubChannel(ac, cc.channelzID, "") - channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{ - Desc: "Subchannel Created", - Severity: channelz.CtInfo, - Parent: &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Subchannel(id:%d) created", ac.channelzID), - Severity: channelz.CtInfo, - }, - }) + + var err error + ac.channelzID, err = channelz.RegisterSubChannel(ac, cc.channelzID, "") + if err != nil { + return nil, err } + channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{ + Desc: "Subchannel created", + Severity: channelz.CtInfo, + Parent: &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Subchannel(id:%d) created", ac.channelzID.Int()), + Severity: channelz.CtInfo, + }, + }) + cc.conns[ac] = struct{}{} - cc.mu.Unlock() return ac, nil } @@ -806,7 +933,7 @@ func (cc *ClientConn) channelzMetric() *channelz.ChannelInternalMetric { // Target returns the target string of the ClientConn. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -833,67 +960,113 @@ func (cc *ClientConn) incrCallsFailed() { func (ac *addrConn) connect() error { ac.mu.Lock() if ac.state == connectivity.Shutdown { + if logger.V(2) { + logger.Infof("connect called on shutdown addrConn; ignoring.") + } ac.mu.Unlock() return errConnClosing } if ac.state != connectivity.Idle { + if logger.V(2) { + logger.Infof("connect called on addrConn in non-idle state (%v); ignoring.", ac.state) + } ac.mu.Unlock() return nil } - // Update connectivity state within the lock to prevent subsequent or - // concurrent calls from resetting the transport more than once. - ac.updateConnectivityState(connectivity.Connecting, nil) ac.mu.Unlock() - // Start a goroutine connecting to the server asynchronously. - go ac.resetTransport() + ac.resetTransport() return nil } -// tryUpdateAddrs tries to update ac.addrs with the new addresses list. -// -// If ac is Connecting, it returns false. The caller should tear down the ac and -// create a new one. Note that the backoff will be reset when this happens. -// -// If ac is TransientFailure, it updates ac.addrs and returns true. The updated -// addresses will be picked up by retry in the next iteration after backoff. -// -// If ac is Shutdown or Idle, it updates ac.addrs and returns true. -// -// If ac is Ready, it checks whether current connected address of ac is in the -// new addrs list. -// - If true, it updates ac.addrs and returns true. The ac will keep using -// the existing connection. -// - If false, it does nothing and returns false. -func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool { +func equalAddresses(a, b []resolver.Address) bool { + if len(a) != len(b) { + return false + } + for i, v := range a { + if !v.Equal(b[i]) { + return false + } + } + return true +} + +// updateAddrs updates ac.addrs with the new addresses list and handles active +// connections or connection attempts. +func (ac *addrConn) updateAddrs(addrs []resolver.Address) { ac.mu.Lock() - defer ac.mu.Unlock() - channelz.Infof(logger, ac.channelzID, "addrConn: tryUpdateAddrs curAddr: %v, addrs: %v", ac.curAddr, addrs) + channelz.Infof(logger, ac.channelzID, "addrConn: updateAddrs curAddr: %v, addrs: %v", ac.curAddr, addrs) + + if equalAddresses(ac.addrs, addrs) { + ac.mu.Unlock() + return + } + + ac.addrs = addrs + if ac.state == connectivity.Shutdown || ac.state == connectivity.TransientFailure || ac.state == connectivity.Idle { - ac.addrs = addrs - return true + // We were not connecting, so do nothing but update the addresses. + ac.mu.Unlock() + return } - if ac.state == connectivity.Connecting { - return false + if ac.state == connectivity.Ready { + // Try to find the connected address. + for _, a := range addrs { + a.ServerName = ac.cc.getServerName(a) + if a.Equal(ac.curAddr) { + // We are connected to a valid address, so do nothing but + // update the addresses. + ac.mu.Unlock() + return + } + } } - // ac.state is Ready, try to find the connected address. - var curAddrFound bool - for _, a := range addrs { - if reflect.DeepEqual(ac.curAddr, a) { - curAddrFound = true - break - } + // We are either connected to the wrong address or currently connecting. + // Stop the current iteration and restart. + + ac.cancel() + ac.ctx, ac.cancel = context.WithCancel(ac.cc.ctx) + + // We have to defer here because GracefulClose => Close => onClose, which + // requires locking ac.mu. + if ac.transport != nil { + defer ac.transport.GracefulClose() + ac.transport = nil } - channelz.Infof(logger, ac.channelzID, "addrConn: tryUpdateAddrs curAddrFound: %v", curAddrFound) - if curAddrFound { - ac.addrs = addrs + + if len(addrs) == 0 { + ac.updateConnectivityState(connectivity.Idle, nil) } - return curAddrFound + ac.mu.Unlock() + + // Since we were connecting/connected, we should start a new connection + // attempt. + go ac.resetTransport() +} + +// getServerName determines the serverName to be used in the connection +// handshake. The default value for the serverName is the authority on the +// ClientConn, which either comes from the user's dial target or through an +// authority override specified using the WithAuthority dial option. Name +// resolvers can specify a per-address override for the serverName through the +// resolver.Address.ServerName field which is used only if the WithAuthority +// dial option was not used. The rationale is that per-address authority +// overrides specified by the name resolver can represent a security risk, while +// an override specified by the user is more dependable since they probably know +// what they are doing. +func (cc *ClientConn) getServerName(addr resolver.Address) string { + if cc.dopts.authority != "" { + return cc.dopts.authority + } + if addr.ServerName != "" { + return addr.ServerName + } + return cc.authority } func getMethodConfig(sc *ServiceConfig, method string) MethodConfig { @@ -934,15 +1107,11 @@ func (cc *ClientConn) healthCheckConfig() *healthCheckConfig { return cc.sc.healthCheckConfig } -func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method string) (transport.ClientTransport, func(balancer.DoneInfo), error) { - t, done, err := cc.blockingpicker.pick(ctx, failfast, balancer.PickInfo{ +func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method string) (transport.ClientTransport, balancer.PickResult, error) { + return cc.blockingpicker.pick(ctx, failfast, balancer.PickInfo{ Ctx: ctx, FullMethodName: method, }) - if err != nil { - return nil, nil, toRPCErr(err) - } - return t, done, nil } func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, configSelector iresolver.ConfigSelector, addrs []resolver.Address) { @@ -967,35 +1136,26 @@ func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, configSel cc.retryThrottler.Store((*retryThrottler)(nil)) } - if cc.dopts.balancerBuilder == nil { - // Only look at balancer types and switch balancer if balancer dial - // option is not set. - var newBalancerName string - if cc.sc != nil && cc.sc.lbConfig != nil { - newBalancerName = cc.sc.lbConfig.name - } else { - var isGRPCLB bool - for _, a := range addrs { - if a.Type == resolver.GRPCLB { - isGRPCLB = true - break - } - } - if isGRPCLB { - newBalancerName = grpclbName - } else if cc.sc != nil && cc.sc.LB != nil { - newBalancerName = *cc.sc.LB - } else { - newBalancerName = PickFirstBalancerName + var newBalancerName string + if cc.sc != nil && cc.sc.lbConfig != nil { + newBalancerName = cc.sc.lbConfig.name + } else { + var isGRPCLB bool + for _, a := range addrs { + if a.Type == resolver.GRPCLB { + isGRPCLB = true + break } } - cc.switchBalancer(newBalancerName) - } else if cc.balancerWrapper == nil { - // Balancer dial option was set, and this is the first time handling - // resolved addresses. Build a balancer with dopts.balancerBuilder. - cc.curBalancerName = cc.dopts.balancerBuilder.Name() - cc.balancerWrapper = newCCBalancerWrapper(cc, cc.dopts.balancerBuilder, cc.balancerBuildOpts) + if isGRPCLB { + newBalancerName = grpclbName + } else if cc.sc != nil && cc.sc.LB != nil { + newBalancerName = *cc.sc.LB + } else { + newBalancerName = PickFirstBalancerName + } } + cc.balancerWrapper.switchTo(newBalancerName) } func (cc *ClientConn) resolveNow(o resolver.ResolveNowOptions) { @@ -1017,7 +1177,7 @@ func (cc *ClientConn) resolveNow(o resolver.ResolveNowOptions) { // However, if a previously unavailable network becomes available, this may be // used to trigger an immediate reconnect. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -1039,44 +1199,45 @@ func (cc *ClientConn) Close() error { cc.mu.Unlock() return ErrClientConnClosing } + + for cc.idlenessState == ccIdlenessStateExitingIdle { + cc.exitIdleCond.Wait() + } + conns := cc.conns cc.conns = nil cc.csMgr.updateState(connectivity.Shutdown) + pWrapper := cc.blockingpicker rWrapper := cc.resolverWrapper - cc.resolverWrapper = nil bWrapper := cc.balancerWrapper - cc.balancerWrapper = nil + idlenessMgr := cc.idlenessMgr cc.mu.Unlock() - cc.blockingpicker.close() - + // The order of closing matters here since the balancer wrapper assumes the + // picker is closed before it is closed. + if pWrapper != nil { + pWrapper.close() + } if bWrapper != nil { bWrapper.close() } if rWrapper != nil { rWrapper.close() } + if idlenessMgr != nil { + idlenessMgr.close() + } for ac := range conns { ac.tearDown(ErrClientConnClosing) } - if channelz.IsOn() { - ted := &channelz.TraceEventDesc{ - Desc: "Channel Deleted", - Severity: channelz.CtInfo, - } - if cc.dopts.channelzParentID != 0 { - ted.Parent = &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Nested channel(id:%d) deleted", cc.channelzID), - Severity: channelz.CtInfo, - } - } - channelz.AddTraceEvent(logger, cc.channelzID, 0, ted) - // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add trace reference to - // the entity being deleted, and thus prevent it from being deleted right away. - channelz.RemoveEntry(cc.channelzID) - } + cc.addTraceEvent("deleted") + // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add + // trace reference to the entity being deleted, and thus prevent it from being + // deleted right away. + channelz.RemoveEntry(cc.channelzID) + return nil } @@ -1101,12 +1262,13 @@ type addrConn struct { addrs []resolver.Address // All addresses that the resolver resolved to. // Use updateConnectivityState for updating addrConn's connectivity state. - state connectivity.State + state connectivity.State + stateChan chan struct{} // closed and recreated on every state change. backoffIdx int // Needs to be stateful for resetConnectBackoff. resetBackoff chan struct{} - channelzID int64 // channelz unique identification number. + channelzID *channelz.Identifier czData *channelzData } @@ -1115,8 +1277,15 @@ func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error) if ac.state == s { return } + // When changing states, reset the state change channel. + close(ac.stateChan) + ac.stateChan = make(chan struct{}) ac.state = s - channelz.Infof(logger, ac.channelzID, "Subchannel Connectivity change to %v", s) + if lastErr == nil { + channelz.Infof(logger, ac.channelzID, "Subchannel Connectivity change to %v", s) + } else { + channelz.Infof(logger, ac.channelzID, "Subchannel Connectivity change to %v, last error: %s", s, lastErr) + } ac.cc.handleSubConnStateChange(ac.acbw, s, lastErr) } @@ -1135,113 +1304,86 @@ func (ac *addrConn) adjustParams(r transport.GoAwayReason) { } func (ac *addrConn) resetTransport() { - for i := 0; ; i++ { - if i > 0 { - ac.cc.resolveNow(resolver.ResolveNowOptions{}) - } + ac.mu.Lock() + acCtx := ac.ctx + if acCtx.Err() != nil { + ac.mu.Unlock() + return + } - ac.mu.Lock() - if ac.state == connectivity.Shutdown { - ac.mu.Unlock() - return - } + addrs := ac.addrs + backoffFor := ac.dopts.bs.Backoff(ac.backoffIdx) + // This will be the duration that dial gets to finish. + dialDuration := minConnectTimeout + if ac.dopts.minConnectTimeout != nil { + dialDuration = ac.dopts.minConnectTimeout() + } - addrs := ac.addrs - backoffFor := ac.dopts.bs.Backoff(ac.backoffIdx) - // This will be the duration that dial gets to finish. - dialDuration := minConnectTimeout - if ac.dopts.minConnectTimeout != nil { - dialDuration = ac.dopts.minConnectTimeout() - } + if dialDuration < backoffFor { + // Give dial more time as we keep failing to connect. + dialDuration = backoffFor + } + // We can potentially spend all the time trying the first address, and + // if the server accepts the connection and then hangs, the following + // addresses will never be tried. + // + // The spec doesn't mention what should be done for multiple addresses. + // https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md#proposed-backoff-algorithm + connectDeadline := time.Now().Add(dialDuration) - if dialDuration < backoffFor { - // Give dial more time as we keep failing to connect. - dialDuration = backoffFor + ac.updateConnectivityState(connectivity.Connecting, nil) + ac.mu.Unlock() + + if err := ac.tryAllAddrs(acCtx, addrs, connectDeadline); err != nil { + ac.cc.resolveNow(resolver.ResolveNowOptions{}) + // After exhausting all addresses, the addrConn enters + // TRANSIENT_FAILURE. + if acCtx.Err() != nil { + return } - // We can potentially spend all the time trying the first address, and - // if the server accepts the connection and then hangs, the following - // addresses will never be tried. - // - // The spec doesn't mention what should be done for multiple addresses. - // https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md#proposed-backoff-algorithm - connectDeadline := time.Now().Add(dialDuration) + ac.mu.Lock() + ac.updateConnectivityState(connectivity.TransientFailure, err) - ac.updateConnectivityState(connectivity.Connecting, nil) - ac.transport = nil + // Backoff. + b := ac.resetBackoff ac.mu.Unlock() - newTr, addr, reconnect, err := ac.tryAllAddrs(addrs, connectDeadline) - if err != nil { - // After exhausting all addresses, the addrConn enters - // TRANSIENT_FAILURE. + timer := time.NewTimer(backoffFor) + select { + case <-timer.C: ac.mu.Lock() - if ac.state == connectivity.Shutdown { - ac.mu.Unlock() - return - } - ac.updateConnectivityState(connectivity.TransientFailure, err) - - // Backoff. - b := ac.resetBackoff + ac.backoffIdx++ ac.mu.Unlock() - - timer := time.NewTimer(backoffFor) - select { - case <-timer.C: - ac.mu.Lock() - ac.backoffIdx++ - ac.mu.Unlock() - case <-b: - timer.Stop() - case <-ac.ctx.Done(): - timer.Stop() - return - } - continue + case <-b: + timer.Stop() + case <-acCtx.Done(): + timer.Stop() + return } ac.mu.Lock() - if ac.state == connectivity.Shutdown { - ac.mu.Unlock() - newTr.Close(fmt.Errorf("reached connectivity state: SHUTDOWN")) - return + if acCtx.Err() == nil { + ac.updateConnectivityState(connectivity.Idle, err) } - ac.curAddr = addr - ac.transport = newTr - ac.backoffIdx = 0 - - hctx, hcancel := context.WithCancel(ac.ctx) - ac.startHealthCheck(hctx) ac.mu.Unlock() - - // Block until the created transport is down. And when this happens, - // we restart from the top of the addr list. - <-reconnect.Done() - hcancel() - // restart connecting - the top of the loop will set state to - // CONNECTING. This is against the current connectivity semantics doc, - // however it allows for graceful behavior for RPCs not yet dispatched - // - unfortunate timing would otherwise lead to the RPC failing even - // though the TRANSIENT_FAILURE state (called for by the doc) would be - // instantaneous. - // - // Ideally we should transition to Idle here and block until there is - // RPC activity that leads to the balancer requesting a reconnect of - // the associated SubConn. + return } + // Success; reset backoff. + ac.mu.Lock() + ac.backoffIdx = 0 + ac.mu.Unlock() } -// tryAllAddrs tries to creates a connection to the addresses, and stop when at the -// first successful one. It returns the transport, the address and a Event in -// the successful case. The Event fires when the returned transport disconnects. -func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.Time) (transport.ClientTransport, resolver.Address, *grpcsync.Event, error) { +// tryAllAddrs tries to creates a connection to the addresses, and stop when at +// the first successful one. It returns an error if no address was successfully +// connected, or updates ac appropriately with the new transport. +func (ac *addrConn) tryAllAddrs(ctx context.Context, addrs []resolver.Address, connectDeadline time.Time) error { var firstConnErr error for _, addr := range addrs { - ac.mu.Lock() - if ac.state == connectivity.Shutdown { - ac.mu.Unlock() - return nil, resolver.Address{}, nil, errConnClosing + if ctx.Err() != nil { + return errConnClosing } + ac.mu.Lock() ac.cc.mu.RLock() ac.dopts.copts.KeepaliveParams = ac.cc.mkp @@ -1255,9 +1397,9 @@ func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.T channelz.Infof(logger, ac.channelzID, "Subchannel picks a new address %q to connect", addr.Addr) - newTr, reconnect, err := ac.createTransport(addr, copts, connectDeadline) + err := ac.createTransport(ctx, addr, copts, connectDeadline) if err == nil { - return newTr, addr, reconnect, nil + return nil } if firstConnErr == nil { firstConnErr = err @@ -1266,86 +1408,90 @@ func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.T } // Couldn't connect to any address. - return nil, resolver.Address{}, nil, firstConnErr + return firstConnErr } -// createTransport creates a connection to addr. It returns the transport and a -// Event in the successful case. The Event fires when the returned transport -// disconnects. -func (ac *addrConn) createTransport(addr resolver.Address, copts transport.ConnectOptions, connectDeadline time.Time) (transport.ClientTransport, *grpcsync.Event, error) { - prefaceReceived := make(chan struct{}) - onCloseCalled := make(chan struct{}) - reconnect := grpcsync.NewEvent() - - // addr.ServerName takes precedent over ClientConn authority, if present. - if addr.ServerName == "" { - addr.ServerName = ac.cc.authority - } +// createTransport creates a connection to addr. It returns an error if the +// address was not successfully connected, or updates ac appropriately with the +// new transport. +func (ac *addrConn) createTransport(ctx context.Context, addr resolver.Address, copts transport.ConnectOptions, connectDeadline time.Time) error { + addr.ServerName = ac.cc.getServerName(addr) + hctx, hcancel := context.WithCancel(ctx) - once := sync.Once{} - onGoAway := func(r transport.GoAwayReason) { + onClose := func(r transport.GoAwayReason) { ac.mu.Lock() + defer ac.mu.Unlock() + // adjust params based on GoAwayReason ac.adjustParams(r) - once.Do(func() { - if ac.state == connectivity.Ready { - // Prevent this SubConn from being used for new RPCs by setting its - // state to Connecting. - // - // TODO: this should be Idle when grpc-go properly supports it. - ac.updateConnectivityState(connectivity.Connecting, nil) - } - }) - ac.mu.Unlock() - reconnect.Fire() - } - - onClose := func() { - ac.mu.Lock() - once.Do(func() { - if ac.state == connectivity.Ready { - // Prevent this SubConn from being used for new RPCs by setting its - // state to Connecting. - // - // TODO: this should be Idle when grpc-go properly supports it. - ac.updateConnectivityState(connectivity.Connecting, nil) - } - }) - ac.mu.Unlock() - close(onCloseCalled) - reconnect.Fire() - } - - onPrefaceReceipt := func() { - close(prefaceReceived) + if ctx.Err() != nil { + // Already shut down or connection attempt canceled. tearDown() or + // updateAddrs() already cleared the transport and canceled hctx + // via ac.ctx, and we expected this connection to be closed, so do + // nothing here. + return + } + hcancel() + if ac.transport == nil { + // We're still connecting to this address, which could error. Do + // not update the connectivity state or resolve; these will happen + // at the end of the tryAllAddrs connection loop in the event of an + // error. + return + } + ac.transport = nil + // Refresh the name resolver on any connection loss. + ac.cc.resolveNow(resolver.ResolveNowOptions{}) + // Always go idle and wait for the LB policy to initiate a new + // connection attempt. + ac.updateConnectivityState(connectivity.Idle, nil) } - connectCtx, cancel := context.WithDeadline(ac.ctx, connectDeadline) + connectCtx, cancel := context.WithDeadline(ctx, connectDeadline) defer cancel() - if channelz.IsOn() { - copts.ChannelzParentID = ac.channelzID - } + copts.ChannelzParentID = ac.channelzID - newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, addr, copts, onPrefaceReceipt, onGoAway, onClose) + newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, addr, copts, onClose) if err != nil { + if logger.V(2) { + logger.Infof("Creating new client transport to %q: %v", addr, err) + } // newTr is either nil, or closed. - channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %v. Err: %v. Reconnecting...", addr, err) - return nil, nil, err + hcancel() + channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %s. Err: %v", addr, err) + return err } - select { - case <-time.After(time.Until(connectDeadline)): - // We didn't get the preface in time. - newTr.Close(fmt.Errorf("failed to receive server preface within timeout")) - channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %v: didn't receive server preface in time. Reconnecting...", addr) - return nil, nil, errors.New("timed out waiting for server handshake") - case <-prefaceReceived: - // We got the preface - huzzah! things are good. - case <-onCloseCalled: - // The transport has already closed - noop. - return nil, nil, errors.New("connection closed") - // TODO(deklerk) this should bail on ac.ctx.Done(). Add a test and fix. + ac.mu.Lock() + defer ac.mu.Unlock() + if ctx.Err() != nil { + // This can happen if the subConn was removed while in `Connecting` + // state. tearDown() would have set the state to `Shutdown`, but + // would not have closed the transport since ac.transport would not + // have been set at that point. + // + // We run this in a goroutine because newTr.Close() calls onClose() + // inline, which requires locking ac.mu. + // + // The error we pass to Close() is immaterial since there are no open + // streams at this point, so no trailers with error details will be sent + // out. We just need to pass a non-nil error. + // + // This can also happen when updateAddrs is called during a connection + // attempt. + go newTr.Close(transport.ErrConnClosing) + return nil } - return newTr, reconnect, nil + if hctx.Err() != nil { + // onClose was already called for this connection, but the connection + // was successfully established first. Consider it a success and set + // the new state to Idle. + ac.updateConnectivityState(connectivity.Idle, nil) + return nil + } + ac.curAddr = addr + ac.transport = newTr + ac.startHealthCheck(hctx) // Will set state to READY if appropriate. + return nil } // startHealthCheck starts the health checking stream (RPC) to watch the health @@ -1415,7 +1561,7 @@ func (ac *addrConn) startHealthCheck(ctx context.Context) { if status.Code(err) == codes.Unimplemented { channelz.Error(logger, ac.channelzID, "Subchannel health check is unimplemented at server side, thus health check is disabled") } else { - channelz.Errorf(logger, ac.channelzID, "HealthCheckFunc exits with unexpected error %v", err) + channelz.Errorf(logger, ac.channelzID, "Health checking failed: %v", err) } } }() @@ -1439,6 +1585,29 @@ func (ac *addrConn) getReadyTransport() transport.ClientTransport { return nil } +// getTransport waits until the addrconn is ready and returns the transport. +// If the context expires first, returns an appropriate status. If the +// addrConn is stopped first, returns an Unavailable status error. +func (ac *addrConn) getTransport(ctx context.Context) (transport.ClientTransport, error) { + for ctx.Err() == nil { + ac.mu.Lock() + t, state, sc := ac.transport, ac.state, ac.stateChan + ac.mu.Unlock() + if state == connectivity.Ready { + return t, nil + } + if state == connectivity.Shutdown { + return nil, status.Errorf(codes.Unavailable, "SubConn shutting down") + } + + select { + case <-ctx.Done(): + case <-sc: + } + } + return nil, status.FromContextError(ctx.Err()).Err() +} + // tearDown starts to tear down the addrConn. // // Note that tearDown doesn't remove ac from ac.cc.conns, so the addrConn struct @@ -1466,19 +1635,18 @@ func (ac *addrConn) tearDown(err error) { curTr.GracefulClose() ac.mu.Lock() } - if channelz.IsOn() { - channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{ - Desc: "Subchannel Deleted", + channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{ + Desc: "Subchannel deleted", + Severity: channelz.CtInfo, + Parent: &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Subchannel(id:%d) deleted", ac.channelzID.Int()), Severity: channelz.CtInfo, - Parent: &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Subchanel(id:%d) deleted", ac.channelzID), - Severity: channelz.CtInfo, - }, - }) - // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add trace reference to - // the entity being deleted, and thus prevent it from being deleted right away. - channelz.RemoveEntry(ac.channelzID) - } + }, + }) + // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add + // trace reference to the entity being deleted, and thus prevent it from + // being deleted right away. + channelz.RemoveEntry(ac.channelzID) ac.mu.Unlock() } @@ -1567,6 +1735,9 @@ func (c *channelzChannel) ChannelzMetric() *channelz.ChannelInternalMetric { // referenced by users. var ErrClientConnTimeout = errors.New("grpc: timed out when dialing") +// getResolver finds the scheme in the cc's resolvers or the global registry. +// scheme should always be lowercase (typically by virtue of url.Parse() +// performing proper RFC3986 behavior). func (cc *ClientConn) getResolver(scheme string) resolver.Builder { for _, rb := range cc.dopts.resolvers { if scheme == rb.Scheme() { @@ -1587,3 +1758,151 @@ func (cc *ClientConn) connectionError() error { defer cc.lceMu.Unlock() return cc.lastConnectionError } + +// parseTargetAndFindResolver parses the user's dial target and stores the +// parsed target in `cc.parsedTarget`. +// +// The resolver to use is determined based on the scheme in the parsed target +// and the same is stored in `cc.resolverBuilder`. +// +// Doesn't grab cc.mu as this method is expected to be called only at Dial time. +func (cc *ClientConn) parseTargetAndFindResolver() error { + channelz.Infof(logger, cc.channelzID, "original dial target is: %q", cc.target) + + var rb resolver.Builder + parsedTarget, err := parseTarget(cc.target) + if err != nil { + channelz.Infof(logger, cc.channelzID, "dial target %q parse failed: %v", cc.target, err) + } else { + channelz.Infof(logger, cc.channelzID, "parsed dial target is: %+v", parsedTarget) + rb = cc.getResolver(parsedTarget.URL.Scheme) + if rb != nil { + cc.parsedTarget = parsedTarget + cc.resolverBuilder = rb + return nil + } + } + + // We are here because the user's dial target did not contain a scheme or + // specified an unregistered scheme. We should fallback to the default + // scheme, except when a custom dialer is specified in which case, we should + // always use passthrough scheme. + defScheme := resolver.GetDefaultScheme() + channelz.Infof(logger, cc.channelzID, "fallback to scheme %q", defScheme) + canonicalTarget := defScheme + ":///" + cc.target + + parsedTarget, err = parseTarget(canonicalTarget) + if err != nil { + channelz.Infof(logger, cc.channelzID, "dial target %q parse failed: %v", canonicalTarget, err) + return err + } + channelz.Infof(logger, cc.channelzID, "parsed dial target is: %+v", parsedTarget) + rb = cc.getResolver(parsedTarget.URL.Scheme) + if rb == nil { + return fmt.Errorf("could not get resolver for default scheme: %q", parsedTarget.URL.Scheme) + } + cc.parsedTarget = parsedTarget + cc.resolverBuilder = rb + return nil +} + +// parseTarget uses RFC 3986 semantics to parse the given target into a +// resolver.Target struct containing scheme, authority and url. Query +// params are stripped from the endpoint. +func parseTarget(target string) (resolver.Target, error) { + u, err := url.Parse(target) + if err != nil { + return resolver.Target{}, err + } + + return resolver.Target{ + Scheme: u.Scheme, + Authority: u.Host, + URL: *u, + }, nil +} + +// Determine channel authority. The order of precedence is as follows: +// - user specified authority override using `WithAuthority` dial option +// - creds' notion of server name for the authentication handshake +// - endpoint from dial target of the form "scheme://[authority]/endpoint" +// +// Stores the determined authority in `cc.authority`. +// +// Returns a non-nil error if the authority returned by the transport +// credentials do not match the authority configured through the dial option. +// +// Doesn't grab cc.mu as this method is expected to be called only at Dial time. +func (cc *ClientConn) determineAuthority() error { + dopts := cc.dopts + // Historically, we had two options for users to specify the serverName or + // authority for a channel. One was through the transport credentials + // (either in its constructor, or through the OverrideServerName() method). + // The other option (for cases where WithInsecure() dial option was used) + // was to use the WithAuthority() dial option. + // + // A few things have changed since: + // - `insecure` package with an implementation of the `TransportCredentials` + // interface for the insecure case + // - WithAuthority() dial option support for secure credentials + authorityFromCreds := "" + if creds := dopts.copts.TransportCredentials; creds != nil && creds.Info().ServerName != "" { + authorityFromCreds = creds.Info().ServerName + } + authorityFromDialOption := dopts.authority + if (authorityFromCreds != "" && authorityFromDialOption != "") && authorityFromCreds != authorityFromDialOption { + return fmt.Errorf("ClientConn's authority from transport creds %q and dial option %q don't match", authorityFromCreds, authorityFromDialOption) + } + + endpoint := cc.parsedTarget.Endpoint() + target := cc.target + switch { + case authorityFromDialOption != "": + cc.authority = authorityFromDialOption + case authorityFromCreds != "": + cc.authority = authorityFromCreds + case strings.HasPrefix(target, "unix:") || strings.HasPrefix(target, "unix-abstract:"): + // TODO: remove when the unix resolver implements optional interface to + // return channel authority. + cc.authority = "localhost" + case strings.HasPrefix(endpoint, ":"): + cc.authority = "localhost" + endpoint + default: + // TODO: Define an optional interface on the resolver builder to return + // the channel authority given the user's dial target. For resolvers + // which don't implement this interface, we will use the endpoint from + // "scheme://authority/endpoint" as the default authority. + cc.authority = endpoint + } + channelz.Infof(logger, cc.channelzID, "Channel authority set to %q", cc.authority) + return nil +} + +// initResolverWrapper creates a ccResolverWrapper, which builds the name +// resolver. This method grabs the lock to assign the newly built resolver +// wrapper to the cc.resolverWrapper field. +func (cc *ClientConn) initResolverWrapper(creds credentials.TransportCredentials) error { + rw, err := newCCResolverWrapper(cc, ccResolverWrapperOpts{ + target: cc.parsedTarget, + builder: cc.resolverBuilder, + bOpts: resolver.BuildOptions{ + DisableServiceConfig: cc.dopts.disableServiceConfig, + DialCreds: creds, + CredsBundle: cc.dopts.copts.CredsBundle, + Dialer: cc.dopts.copts.Dialer, + }, + channelzID: cc.channelzID, + }) + if err != nil { + return fmt.Errorf("failed to build resolver: %v", err) + } + // Resolver implementations may report state update or error inline when + // built (or right after), and this is handled in cc.updateResolverState. + // Also, an error from the resolver might lead to a re-resolution request + // from the balancer, which is handled in resolveNow() where + // `cc.resolverWrapper` is accessed. Hence, we need to hold the lock here. + cc.mu.Lock() + cc.resolverWrapper = rw + cc.mu.Unlock() + return nil +} diff --git a/vendor/google.golang.org/grpc/codes/code_string.go b/vendor/google.golang.org/grpc/codes/code_string.go index 0b206a578..934fac2b0 100644 --- a/vendor/google.golang.org/grpc/codes/code_string.go +++ b/vendor/google.golang.org/grpc/codes/code_string.go @@ -18,7 +18,15 @@ package codes -import "strconv" +import ( + "strconv" + + "google.golang.org/grpc/internal" +) + +func init() { + internal.CanonicalString = canonicalString +} func (c Code) String() string { switch c { @@ -60,3 +68,44 @@ func (c Code) String() string { return "Code(" + strconv.FormatInt(int64(c), 10) + ")" } } + +func canonicalString(c Code) string { + switch c { + case OK: + return "OK" + case Canceled: + return "CANCELLED" + case Unknown: + return "UNKNOWN" + case InvalidArgument: + return "INVALID_ARGUMENT" + case DeadlineExceeded: + return "DEADLINE_EXCEEDED" + case NotFound: + return "NOT_FOUND" + case AlreadyExists: + return "ALREADY_EXISTS" + case PermissionDenied: + return "PERMISSION_DENIED" + case ResourceExhausted: + return "RESOURCE_EXHAUSTED" + case FailedPrecondition: + return "FAILED_PRECONDITION" + case Aborted: + return "ABORTED" + case OutOfRange: + return "OUT_OF_RANGE" + case Unimplemented: + return "UNIMPLEMENTED" + case Internal: + return "INTERNAL" + case Unavailable: + return "UNAVAILABLE" + case DataLoss: + return "DATA_LOSS" + case Unauthenticated: + return "UNAUTHENTICATED" + default: + return "CODE(" + strconv.FormatInt(int64(c), 10) + ")" + } +} diff --git a/vendor/google.golang.org/grpc/connectivity/connectivity.go b/vendor/google.golang.org/grpc/connectivity/connectivity.go index 010156261..4a8992642 100644 --- a/vendor/google.golang.org/grpc/connectivity/connectivity.go +++ b/vendor/google.golang.org/grpc/connectivity/connectivity.go @@ -18,7 +18,6 @@ // Package connectivity defines connectivity semantics. // For details, see https://github.com/grpc/grpc/blob/master/doc/connectivity-semantics-and-api.md. -// All APIs in this package are experimental. package connectivity import ( @@ -45,7 +44,7 @@ func (s State) String() string { return "SHUTDOWN" default: logger.Errorf("unknown connectivity state: %d", s) - return "Invalid-State" + return "INVALID_STATE" } } @@ -61,3 +60,35 @@ const ( // Shutdown indicates the ClientConn has started shutting down. Shutdown ) + +// ServingMode indicates the current mode of operation of the server. +// +// Only xDS enabled gRPC servers currently report their serving mode. +type ServingMode int + +const ( + // ServingModeStarting indicates that the server is starting up. + ServingModeStarting ServingMode = iota + // ServingModeServing indicates that the server contains all required + // configuration and is serving RPCs. + ServingModeServing + // ServingModeNotServing indicates that the server is not accepting new + // connections. Existing connections will be closed gracefully, allowing + // in-progress RPCs to complete. A server enters this mode when it does not + // contain the required configuration to serve RPCs. + ServingModeNotServing +) + +func (s ServingMode) String() string { + switch s { + case ServingModeStarting: + return "STARTING" + case ServingModeServing: + return "SERVING" + case ServingModeNotServing: + return "NOT_SERVING" + default: + logger.Errorf("unknown serving mode: %d", s) + return "INVALID_MODE" + } +} diff --git a/vendor/google.golang.org/grpc/credentials/credentials.go b/vendor/google.golang.org/grpc/credentials/credentials.go index 7eee7e4ec..5feac3aa0 100644 --- a/vendor/google.golang.org/grpc/credentials/credentials.go +++ b/vendor/google.golang.org/grpc/credentials/credentials.go @@ -36,16 +36,16 @@ import ( // PerRPCCredentials defines the common interface for the credentials which need to // attach security information to every RPC (e.g., oauth2). type PerRPCCredentials interface { - // GetRequestMetadata gets the current request metadata, refreshing - // tokens if required. This should be called by the transport layer on - // each request, and the data should be populated in headers or other - // context. If a status code is returned, it will be used as the status - // for the RPC. uri is the URI of the entry point for the request. - // When supported by the underlying implementation, ctx can be used for - // timeout and cancellation. Additionally, RequestInfo data will be - // available via ctx to this call. - // TODO(zhaoq): Define the set of the qualified keys instead of leaving - // it as an arbitrary string. + // GetRequestMetadata gets the current request metadata, refreshing tokens + // if required. This should be called by the transport layer on each + // request, and the data should be populated in headers or other + // context. If a status code is returned, it will be used as the status for + // the RPC (restricted to an allowable set of codes as defined by gRFC + // A54). uri is the URI of the entry point for the request. When supported + // by the underlying implementation, ctx can be used for timeout and + // cancellation. Additionally, RequestInfo data will be available via ctx + // to this call. TODO(zhaoq): Define the set of the qualified keys instead + // of leaving it as an arbitrary string. GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) // RequireTransportSecurity indicates whether the credentials requires // transport security. @@ -140,6 +140,11 @@ type TransportCredentials interface { // Additionally, ClientHandshakeInfo data will be available via the context // passed to this call. // + // The second argument to this method is the `:authority` header value used + // while creating new streams on this connection after authentication + // succeeds. Implementations must use this as the server name during the + // authentication handshake. + // // If the returned net.Conn is closed, it MUST close the net.Conn provided. ClientHandshake(context.Context, string, net.Conn) (net.Conn, AuthInfo, error) // ServerHandshake does the authentication handshake for servers. It returns @@ -153,9 +158,13 @@ type TransportCredentials interface { Info() ProtocolInfo // Clone makes a copy of this TransportCredentials. Clone() TransportCredentials - // OverrideServerName overrides the server name used to verify the hostname on the returned certificates from the server. - // gRPC internals also use it to override the virtual hosting name if it is set. - // It must be called before dialing. Currently, this is only used by grpclb. + // OverrideServerName specifies the value used for the following: + // - verifying the hostname on the returned certificates + // - as SNI in the client's handshake to support virtual hosting + // - as the value for `:authority` header at stream creation time + // + // Deprecated: use grpc.WithAuthority instead. Will be supported + // throughout 1.x. OverrideServerName(string) error } @@ -169,8 +178,18 @@ type TransportCredentials interface { // // This API is experimental. type Bundle interface { + // TransportCredentials returns the transport credentials from the Bundle. + // + // Implementations must return non-nil transport credentials. If transport + // security is not needed by the Bundle, implementations may choose to + // return insecure.NewCredentials(). TransportCredentials() TransportCredentials + + // PerRPCCredentials returns the per-RPC credentials from the Bundle. + // + // May be nil if per-RPC credentials are not needed. PerRPCCredentials() PerRPCCredentials + // NewWithMode should make a copy of Bundle, and switch mode. Modifying the // existing Bundle may cause races. // diff --git a/vendor/google.golang.org/grpc/credentials/go12.go b/vendor/google.golang.org/grpc/credentials/go12.go deleted file mode 100644 index ccbf35b33..000000000 --- a/vendor/google.golang.org/grpc/credentials/go12.go +++ /dev/null @@ -1,30 +0,0 @@ -// +build go1.12 - -/* - * - * Copyright 2019 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package credentials - -import "crypto/tls" - -// This init function adds cipher suite constants only defined in Go 1.12. -func init() { - cipherSuiteLookup[tls.TLS_AES_128_GCM_SHA256] = "TLS_AES_128_GCM_SHA256" - cipherSuiteLookup[tls.TLS_AES_256_GCM_SHA384] = "TLS_AES_256_GCM_SHA384" - cipherSuiteLookup[tls.TLS_CHACHA20_POLY1305_SHA256] = "TLS_CHACHA20_POLY1305_SHA256" -} diff --git a/vendor/google.golang.org/grpc/credentials/insecure/insecure.go b/vendor/google.golang.org/grpc/credentials/insecure/insecure.go new file mode 100644 index 000000000..82bee1443 --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/insecure/insecure.go @@ -0,0 +1,98 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package insecure provides an implementation of the +// credentials.TransportCredentials interface which disables transport security. +package insecure + +import ( + "context" + "net" + + "google.golang.org/grpc/credentials" +) + +// NewCredentials returns a credentials which disables transport security. +// +// Note that using this credentials with per-RPC credentials which require +// transport security is incompatible and will cause grpc.Dial() to fail. +func NewCredentials() credentials.TransportCredentials { + return insecureTC{} +} + +// insecureTC implements the insecure transport credentials. The handshake +// methods simply return the passed in net.Conn and set the security level to +// NoSecurity. +type insecureTC struct{} + +func (insecureTC) ClientHandshake(ctx context.Context, _ string, conn net.Conn) (net.Conn, credentials.AuthInfo, error) { + return conn, info{credentials.CommonAuthInfo{SecurityLevel: credentials.NoSecurity}}, nil +} + +func (insecureTC) ServerHandshake(conn net.Conn) (net.Conn, credentials.AuthInfo, error) { + return conn, info{credentials.CommonAuthInfo{SecurityLevel: credentials.NoSecurity}}, nil +} + +func (insecureTC) Info() credentials.ProtocolInfo { + return credentials.ProtocolInfo{SecurityProtocol: "insecure"} +} + +func (insecureTC) Clone() credentials.TransportCredentials { + return insecureTC{} +} + +func (insecureTC) OverrideServerName(string) error { + return nil +} + +// info contains the auth information for an insecure connection. +// It implements the AuthInfo interface. +type info struct { + credentials.CommonAuthInfo +} + +// AuthType returns the type of info as a string. +func (info) AuthType() string { + return "insecure" +} + +// insecureBundle implements an insecure bundle. +// An insecure bundle provides a thin wrapper around insecureTC to support +// the credentials.Bundle interface. +type insecureBundle struct{} + +// NewBundle returns a bundle with disabled transport security and no per rpc credential. +func NewBundle() credentials.Bundle { + return insecureBundle{} +} + +// NewWithMode returns a new insecure Bundle. The mode is ignored. +func (insecureBundle) NewWithMode(string) (credentials.Bundle, error) { + return insecureBundle{}, nil +} + +// PerRPCCredentials returns an nil implementation as insecure +// bundle does not support a per rpc credential. +func (insecureBundle) PerRPCCredentials() credentials.PerRPCCredentials { + return nil +} + +// TransportCredentials returns the underlying insecure transport credential. +func (insecureBundle) TransportCredentials() credentials.TransportCredentials { + return NewCredentials() +} diff --git a/vendor/google.golang.org/grpc/credentials/tls.go b/vendor/google.golang.org/grpc/credentials/tls.go index 8ee7124f2..877b7cd21 100644 --- a/vendor/google.golang.org/grpc/credentials/tls.go +++ b/vendor/google.golang.org/grpc/credentials/tls.go @@ -23,9 +23,9 @@ import ( "crypto/tls" "crypto/x509" "fmt" - "io/ioutil" "net" "net/url" + "os" credinternal "google.golang.org/grpc/internal/credentials" ) @@ -166,7 +166,7 @@ func NewClientTLSFromCert(cp *x509.CertPool, serverNameOverride string) Transpor // it will override the virtual host name of authority (e.g. :authority header // field) in requests. func NewClientTLSFromFile(certFile, serverNameOverride string) (TransportCredentials, error) { - b, err := ioutil.ReadFile(certFile) + b, err := os.ReadFile(certFile) if err != nil { return nil, err } @@ -195,7 +195,7 @@ func NewServerTLSFromFile(certFile, keyFile string) (TransportCredentials, error // TLSChannelzSecurityValue defines the struct that TLS protocol should return // from GetSecurityValue(), containing security info like cipher and certificate used. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -230,4 +230,7 @@ var cipherSuiteLookup = map[uint16]string{ tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256: "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256", tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305", + tls.TLS_AES_128_GCM_SHA256: "TLS_AES_128_GCM_SHA256", + tls.TLS_AES_256_GCM_SHA384: "TLS_AES_256_GCM_SHA384", + tls.TLS_CHACHA20_POLY1305_SHA256: "TLS_CHACHA20_POLY1305_SHA256", } diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go index 7a497237b..15a3d5102 100644 --- a/vendor/google.golang.org/grpc/dialoptions.go +++ b/vendor/google.golang.org/grpc/dialoptions.go @@ -20,22 +20,34 @@ package grpc import ( "context" - "fmt" "net" "time" "google.golang.org/grpc/backoff" - "google.golang.org/grpc/balancer" + "google.golang.org/grpc/channelz" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal" internalbackoff "google.golang.org/grpc/internal/backoff" - "google.golang.org/grpc/internal/envconfig" + "google.golang.org/grpc/internal/binarylog" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/resolver" "google.golang.org/grpc/stats" ) +func init() { + internal.AddGlobalDialOptions = func(opt ...DialOption) { + globalDialOptions = append(globalDialOptions, opt...) + } + internal.ClearGlobalDialOptions = func() { + globalDialOptions = nil + } + internal.WithBinaryLogger = withBinaryLogger + internal.JoinDialOptions = newJoinDialOption + internal.DisableGlobalDialOptions = newDisableGlobalDialOptions +} + // dialOptions configure a Dial call. dialOptions are set by the DialOption // values passed to Dial. type dialOptions struct { @@ -45,20 +57,18 @@ type dialOptions struct { chainUnaryInts []UnaryClientInterceptor chainStreamInts []StreamClientInterceptor - cp Compressor - dc Decompressor - bs internalbackoff.Strategy - block bool - returnLastError bool - insecure bool - timeout time.Duration - scChan <-chan ServiceConfig - authority string - copts transport.ConnectOptions - callOptions []CallOption - // This is used by WithBalancerName dial option. - balancerBuilder balancer.Builder - channelzParentID int64 + cp Compressor + dc Decompressor + bs internalbackoff.Strategy + block bool + returnLastError bool + timeout time.Duration + scChan <-chan ServiceConfig + authority string + binaryLogger binarylog.Logger + copts transport.ConnectOptions + callOptions []CallOption + channelzParentID *channelz.Identifier disableServiceConfig bool disableRetry bool disableHealthCheck bool @@ -67,6 +77,7 @@ type dialOptions struct { defaultServiceConfig *ServiceConfig // defaultServiceConfig is parsed from defaultServiceConfigRawJSON. defaultServiceConfigRawJSON *string resolvers []resolver.Builder + idleTimeout time.Duration } // DialOption configures how we set up the connection. @@ -74,10 +85,12 @@ type DialOption interface { apply(*dialOptions) } +var globalDialOptions []DialOption + // EmptyDialOption does not alter the dial configuration. It can be embedded in // another structure to build custom dial options. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -85,6 +98,16 @@ type EmptyDialOption struct{} func (EmptyDialOption) apply(*dialOptions) {} +type disableGlobalDialOptions struct{} + +func (disableGlobalDialOptions) apply(*dialOptions) {} + +// newDisableGlobalDialOptions returns a DialOption that prevents the ClientConn +// from applying the global DialOptions (set via AddGlobalDialOptions). +func newDisableGlobalDialOptions() DialOption { + return &disableGlobalDialOptions{} +} + // funcDialOption wraps a function that modifies dialOptions into an // implementation of the DialOption interface. type funcDialOption struct { @@ -101,13 +124,28 @@ func newFuncDialOption(f func(*dialOptions)) *funcDialOption { } } +type joinDialOption struct { + opts []DialOption +} + +func (jdo *joinDialOption) apply(do *dialOptions) { + for _, opt := range jdo.opts { + opt.apply(do) + } +} + +func newJoinDialOption(opts ...DialOption) DialOption { + return &joinDialOption{opts: opts} +} + // WithWriteBufferSize determines how much data can be batched before doing a // write on the wire. The corresponding memory allocation for this buffer will // be twice the size to keep syscalls low. The default value for this buffer is // 32KB. // -// Zero will disable the write buffer such that each write will be on underlying -// connection. Note: A Send call may not directly translate to a write. +// Zero or negative values will disable the write buffer such that each write +// will be on underlying connection. Note: A Send call may not directly +// translate to a write. func WithWriteBufferSize(s int) DialOption { return newFuncDialOption(func(o *dialOptions) { o.copts.WriteBufferSize = s @@ -117,8 +155,9 @@ func WithWriteBufferSize(s int) DialOption { // WithReadBufferSize lets you set the size of read buffer, this determines how // much data can be read at most for each read syscall. // -// The default value for this buffer is 32KB. Zero will disable read buffer for -// a connection so data framer can access the underlying conn directly. +// The default value for this buffer is 32KB. Zero or negative values will +// disable read buffer for a connection so data framer can access the +// underlying conn directly. func WithReadBufferSize(s int) DialOption { return newFuncDialOption(func(o *dialOptions) { o.copts.ReadBufferSize = s @@ -196,25 +235,6 @@ func WithDecompressor(dc Decompressor) DialOption { }) } -// WithBalancerName sets the balancer that the ClientConn will be initialized -// with. Balancer registered with balancerName will be used. This function -// panics if no balancer was registered by balancerName. -// -// The balancer cannot be overridden by balancer option specified by service -// config. -// -// Deprecated: use WithDefaultServiceConfig and WithDisableServiceConfig -// instead. Will be removed in a future 1.x release. -func WithBalancerName(balancerName string) DialOption { - builder := balancer.Get(balancerName) - if builder == nil { - panic(fmt.Sprintf("grpc.WithBalancerName: no balancer is registered for name %v", balancerName)) - } - return newFuncDialOption(func(o *dialOptions) { - o.balancerBuilder = builder - }) -} - // WithServiceConfig returns a DialOption which has a channel to read the // service configuration. // @@ -228,18 +248,14 @@ func WithServiceConfig(c <-chan ServiceConfig) DialOption { }) } -// WithConnectParams configures the dialer to use the provided ConnectParams. +// WithConnectParams configures the ClientConn to use the provided ConnectParams +// for creating and maintaining connections to servers. // // The backoff configuration specified as part of the ConnectParams overrides // all defaults specified in // https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. Consider // using the backoff.DefaultConfig as a base, in cases where you want to // override only a subset of the backoff configuration. -// -// Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. func WithConnectParams(p ConnectParams) DialOption { return newFuncDialOption(func(o *dialOptions) { o.bs = internalbackoff.Exponential{Config: p.Backoff} @@ -277,9 +293,12 @@ func withBackoff(bs internalbackoff.Strategy) DialOption { }) } -// WithBlock returns a DialOption which makes caller of Dial blocks until the +// WithBlock returns a DialOption which makes callers of Dial block until the // underlying connection is up. Without this, Dial returns immediately and // connecting the server happens in background. +// +// Use of this feature is not recommended. For more information, please see: +// https://github.com/grpc/grpc-go/blob/master/Documentation/anti-patterns.md func WithBlock() DialOption { return newFuncDialOption(func(o *dialOptions) { o.block = true @@ -291,7 +310,10 @@ func WithBlock() DialOption { // the context.DeadlineExceeded error. // Implies WithBlock() // -// Experimental +// Use of this feature is not recommended. For more information, please see: +// https://github.com/grpc/grpc-go/blob/master/Documentation/anti-patterns.md +// +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -303,18 +325,24 @@ func WithReturnConnectionError() DialOption { } // WithInsecure returns a DialOption which disables transport security for this -// ClientConn. Note that transport security is required unless WithInsecure is -// set. +// ClientConn. Under the hood, it uses insecure.NewCredentials(). +// +// Note that using this DialOption with per-RPC credentials (through +// WithCredentialsBundle or WithPerRPCCredentials) which require transport +// security is incompatible and will cause grpc.Dial() to fail. +// +// Deprecated: use WithTransportCredentials and insecure.NewCredentials() +// instead. Will be supported throughout 1.x. func WithInsecure() DialOption { return newFuncDialOption(func(o *dialOptions) { - o.insecure = true + o.copts.TransportCredentials = insecure.NewCredentials() }) } // WithNoProxy returns a DialOption which disables the use of proxies for this // ClientConn. This is ignored if WithDialer or WithContextDialer are used. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -345,7 +373,7 @@ func WithPerRPCCredentials(creds credentials.PerRPCCredentials) DialOption { // the ClientConn.WithCreds. This should not be used together with // WithTransportCredentials. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -401,7 +429,21 @@ func WithDialer(f func(string, time.Duration) (net.Conn, error)) DialOption { // all the RPCs and underlying network connections in this ClientConn. func WithStatsHandler(h stats.Handler) DialOption { return newFuncDialOption(func(o *dialOptions) { - o.copts.StatsHandler = h + if h == nil { + logger.Error("ignoring nil parameter in grpc.WithStatsHandler ClientOption") + // Do not allow a nil stats handler, which would otherwise cause + // panics. + return + } + o.copts.StatsHandlers = append(o.copts.StatsHandlers, h) + }) +} + +// withBinaryLogger returns a DialOption that specifies the binary logger for +// this ClientConn. +func withBinaryLogger(bl binarylog.Logger) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.binaryLogger = bl }) } @@ -413,7 +455,10 @@ func WithStatsHandler(h stats.Handler) DialOption { // FailOnNonTempDialError only affects the initial dial, and does not do // anything useful unless you are also using WithBlock(). // -// Experimental +// Use of this feature is not recommended. For more information, please see: +// https://github.com/grpc/grpc-go/blob/master/Documentation/anti-patterns.md +// +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -482,8 +527,7 @@ func WithChainStreamInterceptor(interceptors ...StreamClientInterceptor) DialOpt } // WithAuthority returns a DialOption that specifies the value to be used as the -// :authority pseudo-header. This value only works with WithInsecure and has no -// effect if TransportCredentials are present. +// :authority pseudo-header and as the server name in authentication handshake. func WithAuthority(a string) DialOption { return newFuncDialOption(func(o *dialOptions) { o.authority = a @@ -494,11 +538,11 @@ func WithAuthority(a string) DialOption { // current ClientConn's parent. This function is used in nested channel creation // (e.g. grpclb dial). // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. -func WithChannelzParentID(id int64) DialOption { +func WithChannelzParentID(id *channelz.Identifier) DialOption { return newFuncDialOption(func(o *dialOptions) { o.channelzParentID = id }) @@ -519,14 +563,16 @@ func WithDisableServiceConfig() DialOption { // WithDefaultServiceConfig returns a DialOption that configures the default // service config, which will be used in cases where: // -// 1. WithDisableServiceConfig is also used. -// 2. Resolver does not return a service config or if the resolver returns an -// invalid service config. +// 1. WithDisableServiceConfig is also used, or // -// Experimental +// 2. The name resolver does not provide a service config or provides an +// invalid service config. // -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. +// The parameter s is the JSON representation of the default service config. +// For more information about service configs, see: +// https://github.com/grpc/grpc/blob/master/doc/service_config.md +// For a simple example of usage, see: +// examples/features/load_balancing/client/main.go func WithDefaultServiceConfig(s string) DialOption { return newFuncDialOption(func(o *dialOptions) { o.defaultServiceConfigRawJSON = &s @@ -537,15 +583,6 @@ func WithDefaultServiceConfig(s string) DialOption { // service config enables them. This does not impact transparent retries, which // will happen automatically if no data is written to the wire or if the RPC is // unprocessed by the remote server. -// -// Retry support is currently disabled by default, but will be enabled by -// default in the future. Until then, it may be enabled by setting the -// environment variable "GRPC_GO_RETRY" to "on". -// -// Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. func WithDisableRetry() DialOption { return newFuncDialOption(func(o *dialOptions) { o.disableRetry = true @@ -563,7 +600,7 @@ func WithMaxHeaderListSize(s uint32) DialOption { // WithDisableHealthCheck disables the LB channel health checking for all // SubConns of this ClientConn. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -585,7 +622,6 @@ func withHealthCheckFunc(f internal.HealthChecker) DialOption { func defaultDialOptions() dialOptions { return dialOptions{ - disableRetry: !envconfig.Retry, healthCheckFunc: internal.HealthCheckFunc, copts: transport.ConnectOptions{ WriteBufferSize: defaultWriteBufSize, @@ -611,7 +647,7 @@ func withMinConnectDeadline(f func() time.Duration) DialOption { // resolver.Register. They will be matched against the scheme used for the // current Dial only, and will take precedence over the global registry. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -620,3 +656,23 @@ func WithResolvers(rs ...resolver.Builder) DialOption { o.resolvers = append(o.resolvers, rs...) }) } + +// WithIdleTimeout returns a DialOption that configures an idle timeout for the +// channel. If the channel is idle for the configured timeout, i.e there are no +// ongoing RPCs and no new RPCs are initiated, the channel will enter idle mode +// and as a result the name resolver and load balancer will be shut down. The +// channel will exit idle mode when the Connect() method is called or when an +// RPC is initiated. +// +// By default this feature is disabled, which can also be explicitly configured +// by passing zero to this function. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WithIdleTimeout(d time.Duration) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.idleTimeout = d + }) +} diff --git a/vendor/google.golang.org/grpc/encoding/encoding.go b/vendor/google.golang.org/grpc/encoding/encoding.go index 6d84f74c7..07a586135 100644 --- a/vendor/google.golang.org/grpc/encoding/encoding.go +++ b/vendor/google.golang.org/grpc/encoding/encoding.go @@ -19,7 +19,7 @@ // Package encoding defines the interface for the compressor and codec, and // functions to register and retrieve compressors and codecs. // -// Experimental +// # Experimental // // Notice: This package is EXPERIMENTAL and may be changed or removed in a // later release. @@ -28,6 +28,8 @@ package encoding import ( "io" "strings" + + "google.golang.org/grpc/internal/grpcutil" ) // Identity specifies the optional encoding for uncompressed streams. @@ -73,6 +75,9 @@ var registeredCompressor = make(map[string]Compressor) // registered with the same name, the one registered last will take effect. func RegisterCompressor(c Compressor) { registeredCompressor[c.Name()] = c + if !grpcutil.IsCompressorNameRegistered(c.Name()) { + grpcutil.RegisteredCompressorNames = append(grpcutil.RegisteredCompressorNames, c.Name()) + } } // GetCompressor returns Compressor for the given compressor name. @@ -108,7 +113,7 @@ var registeredCodecs = make(map[string]Codec) // more details. // // NOTE: this function must only be called during initialization time (i.e. in -// an init() function), and is not thread-safe. If multiple Compressors are +// an init() function), and is not thread-safe. If multiple Codecs are // registered with the same name, the one registered last will take effect. func RegisterCodec(codec Codec) { if codec == nil { diff --git a/vendor/google.golang.org/grpc/grpclog/loggerv2.go b/vendor/google.golang.org/grpc/grpclog/loggerv2.go index 4ee33171e..5de66e40d 100644 --- a/vendor/google.golang.org/grpc/grpclog/loggerv2.go +++ b/vendor/google.golang.org/grpc/grpclog/loggerv2.go @@ -19,11 +19,13 @@ package grpclog import ( + "encoding/json" + "fmt" "io" - "io/ioutil" "log" "os" "strconv" + "strings" "google.golang.org/grpc/internal/grpclog" ) @@ -95,8 +97,9 @@ var severityName = []string{ // loggerT is the default logger used by grpclog. type loggerT struct { - m []*log.Logger - v int + m []*log.Logger + v int + jsonFormat bool } // NewLoggerV2 creates a loggerV2 with the provided writers. @@ -105,27 +108,40 @@ type loggerT struct { // Warning logs will be written to warningW and infoW. // Info logs will be written to infoW. func NewLoggerV2(infoW, warningW, errorW io.Writer) LoggerV2 { - return NewLoggerV2WithVerbosity(infoW, warningW, errorW, 0) + return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{}) } // NewLoggerV2WithVerbosity creates a loggerV2 with the provided writers and // verbosity level. func NewLoggerV2WithVerbosity(infoW, warningW, errorW io.Writer, v int) LoggerV2 { + return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{verbose: v}) +} + +type loggerV2Config struct { + verbose int + jsonFormat bool +} + +func newLoggerV2WithConfig(infoW, warningW, errorW io.Writer, c loggerV2Config) LoggerV2 { var m []*log.Logger - m = append(m, log.New(infoW, severityName[infoLog]+": ", log.LstdFlags)) - m = append(m, log.New(io.MultiWriter(infoW, warningW), severityName[warningLog]+": ", log.LstdFlags)) + flag := log.LstdFlags + if c.jsonFormat { + flag = 0 + } + m = append(m, log.New(infoW, "", flag)) + m = append(m, log.New(io.MultiWriter(infoW, warningW), "", flag)) ew := io.MultiWriter(infoW, warningW, errorW) // ew will be used for error and fatal. - m = append(m, log.New(ew, severityName[errorLog]+": ", log.LstdFlags)) - m = append(m, log.New(ew, severityName[fatalLog]+": ", log.LstdFlags)) - return &loggerT{m: m, v: v} + m = append(m, log.New(ew, "", flag)) + m = append(m, log.New(ew, "", flag)) + return &loggerT{m: m, v: c.verbose, jsonFormat: c.jsonFormat} } // newLoggerV2 creates a loggerV2 to be used as default logger. // All logs are written to stderr. func newLoggerV2() LoggerV2 { - errorW := ioutil.Discard - warningW := ioutil.Discard - infoW := ioutil.Discard + errorW := io.Discard + warningW := io.Discard + infoW := io.Discard logLevel := os.Getenv("GRPC_GO_LOG_SEVERITY_LEVEL") switch logLevel { @@ -142,58 +158,79 @@ func newLoggerV2() LoggerV2 { if vl, err := strconv.Atoi(vLevel); err == nil { v = vl } - return NewLoggerV2WithVerbosity(infoW, warningW, errorW, v) + + jsonFormat := strings.EqualFold(os.Getenv("GRPC_GO_LOG_FORMATTER"), "json") + + return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{ + verbose: v, + jsonFormat: jsonFormat, + }) +} + +func (g *loggerT) output(severity int, s string) { + sevStr := severityName[severity] + if !g.jsonFormat { + g.m[severity].Output(2, fmt.Sprintf("%v: %v", sevStr, s)) + return + } + // TODO: we can also include the logging component, but that needs more + // (API) changes. + b, _ := json.Marshal(map[string]string{ + "severity": sevStr, + "message": s, + }) + g.m[severity].Output(2, string(b)) } func (g *loggerT) Info(args ...interface{}) { - g.m[infoLog].Print(args...) + g.output(infoLog, fmt.Sprint(args...)) } func (g *loggerT) Infoln(args ...interface{}) { - g.m[infoLog].Println(args...) + g.output(infoLog, fmt.Sprintln(args...)) } func (g *loggerT) Infof(format string, args ...interface{}) { - g.m[infoLog].Printf(format, args...) + g.output(infoLog, fmt.Sprintf(format, args...)) } func (g *loggerT) Warning(args ...interface{}) { - g.m[warningLog].Print(args...) + g.output(warningLog, fmt.Sprint(args...)) } func (g *loggerT) Warningln(args ...interface{}) { - g.m[warningLog].Println(args...) + g.output(warningLog, fmt.Sprintln(args...)) } func (g *loggerT) Warningf(format string, args ...interface{}) { - g.m[warningLog].Printf(format, args...) + g.output(warningLog, fmt.Sprintf(format, args...)) } func (g *loggerT) Error(args ...interface{}) { - g.m[errorLog].Print(args...) + g.output(errorLog, fmt.Sprint(args...)) } func (g *loggerT) Errorln(args ...interface{}) { - g.m[errorLog].Println(args...) + g.output(errorLog, fmt.Sprintln(args...)) } func (g *loggerT) Errorf(format string, args ...interface{}) { - g.m[errorLog].Printf(format, args...) + g.output(errorLog, fmt.Sprintf(format, args...)) } func (g *loggerT) Fatal(args ...interface{}) { - g.m[fatalLog].Fatal(args...) - // No need to call os.Exit() again because log.Logger.Fatal() calls os.Exit(). + g.output(fatalLog, fmt.Sprint(args...)) + os.Exit(1) } func (g *loggerT) Fatalln(args ...interface{}) { - g.m[fatalLog].Fatalln(args...) - // No need to call os.Exit() again because log.Logger.Fatal() calls os.Exit(). + g.output(fatalLog, fmt.Sprintln(args...)) + os.Exit(1) } func (g *loggerT) Fatalf(format string, args ...interface{}) { - g.m[fatalLog].Fatalf(format, args...) - // No need to call os.Exit() again because log.Logger.Fatal() calls os.Exit(). + g.output(fatalLog, fmt.Sprintf(format, args...)) + os.Exit(1) } func (g *loggerT) V(l int) bool { @@ -204,18 +241,18 @@ func (g *loggerT) V(l int) bool { // DepthLoggerV2, the below functions will be called with the appropriate stack // depth set for trivial functions the logger may ignore. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. type DepthLoggerV2 interface { LoggerV2 - // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Print. + // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println. InfoDepth(depth int, args ...interface{}) - // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Print. + // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println. WarningDepth(depth int, args ...interface{}) - // ErrorDetph logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Print. + // ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println. ErrorDepth(depth int, args ...interface{}) - // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Print. + // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println. FatalDepth(depth int, args ...interface{}) } diff --git a/vendor/google.golang.org/grpc/idle.go b/vendor/google.golang.org/grpc/idle.go new file mode 100644 index 000000000..dc3dc72f6 --- /dev/null +++ b/vendor/google.golang.org/grpc/idle.go @@ -0,0 +1,287 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "fmt" + "math" + "sync" + "sync/atomic" + "time" +) + +// For overriding in unit tests. +var timeAfterFunc = func(d time.Duration, f func()) *time.Timer { + return time.AfterFunc(d, f) +} + +// idlenessEnforcer is the functionality provided by grpc.ClientConn to enter +// and exit from idle mode. +type idlenessEnforcer interface { + exitIdleMode() error + enterIdleMode() error +} + +// idlenessManager defines the functionality required to track RPC activity on a +// channel. +type idlenessManager interface { + onCallBegin() error + onCallEnd() + close() +} + +type noopIdlenessManager struct{} + +func (noopIdlenessManager) onCallBegin() error { return nil } +func (noopIdlenessManager) onCallEnd() {} +func (noopIdlenessManager) close() {} + +// idlenessManagerImpl implements the idlenessManager interface. It uses atomic +// operations to synchronize access to shared state and a mutex to guarantee +// mutual exclusion in a critical section. +type idlenessManagerImpl struct { + // State accessed atomically. + lastCallEndTime int64 // Unix timestamp in nanos; time when the most recent RPC completed. + activeCallsCount int32 // Count of active RPCs; -math.MaxInt32 means channel is idle or is trying to get there. + activeSinceLastTimerCheck int32 // Boolean; True if there was an RPC since the last timer callback. + closed int32 // Boolean; True when the manager is closed. + + // Can be accessed without atomics or mutex since these are set at creation + // time and read-only after that. + enforcer idlenessEnforcer // Functionality provided by grpc.ClientConn. + timeout int64 // Idle timeout duration nanos stored as an int64. + + // idleMu is used to guarantee mutual exclusion in two scenarios: + // - Opposing intentions: + // - a: Idle timeout has fired and handleIdleTimeout() is trying to put + // the channel in idle mode because the channel has been inactive. + // - b: At the same time an RPC is made on the channel, and onCallBegin() + // is trying to prevent the channel from going idle. + // - Competing intentions: + // - The channel is in idle mode and there are multiple RPCs starting at + // the same time, all trying to move the channel out of idle. Only one + // of them should succeed in doing so, while the other RPCs should + // piggyback on the first one and be successfully handled. + idleMu sync.RWMutex + actuallyIdle bool + timer *time.Timer +} + +// newIdlenessManager creates a new idleness manager implementation for the +// given idle timeout. +func newIdlenessManager(enforcer idlenessEnforcer, idleTimeout time.Duration) idlenessManager { + if idleTimeout == 0 { + return noopIdlenessManager{} + } + + i := &idlenessManagerImpl{ + enforcer: enforcer, + timeout: int64(idleTimeout), + } + i.timer = timeAfterFunc(idleTimeout, i.handleIdleTimeout) + return i +} + +// resetIdleTimer resets the idle timer to the given duration. This method +// should only be called from the timer callback. +func (i *idlenessManagerImpl) resetIdleTimer(d time.Duration) { + i.idleMu.Lock() + defer i.idleMu.Unlock() + + if i.timer == nil { + // Only close sets timer to nil. We are done. + return + } + + // It is safe to ignore the return value from Reset() because this method is + // only ever called from the timer callback, which means the timer has + // already fired. + i.timer.Reset(d) +} + +// handleIdleTimeout is the timer callback that is invoked upon expiry of the +// configured idle timeout. The channel is considered inactive if there are no +// ongoing calls and no RPC activity since the last time the timer fired. +func (i *idlenessManagerImpl) handleIdleTimeout() { + if i.isClosed() { + return + } + + if atomic.LoadInt32(&i.activeCallsCount) > 0 { + i.resetIdleTimer(time.Duration(i.timeout)) + return + } + + // There has been activity on the channel since we last got here. Reset the + // timer and return. + if atomic.LoadInt32(&i.activeSinceLastTimerCheck) == 1 { + // Set the timer to fire after a duration of idle timeout, calculated + // from the time the most recent RPC completed. + atomic.StoreInt32(&i.activeSinceLastTimerCheck, 0) + i.resetIdleTimer(time.Duration(atomic.LoadInt64(&i.lastCallEndTime) + i.timeout - time.Now().UnixNano())) + return + } + + // This CAS operation is extremely likely to succeed given that there has + // been no activity since the last time we were here. Setting the + // activeCallsCount to -math.MaxInt32 indicates to onCallBegin() that the + // channel is either in idle mode or is trying to get there. + if !atomic.CompareAndSwapInt32(&i.activeCallsCount, 0, -math.MaxInt32) { + // This CAS operation can fail if an RPC started after we checked for + // activity at the top of this method, or one was ongoing from before + // the last time we were here. In both case, reset the timer and return. + i.resetIdleTimer(time.Duration(i.timeout)) + return + } + + // Now that we've set the active calls count to -math.MaxInt32, it's time to + // actually move to idle mode. + if i.tryEnterIdleMode() { + // Successfully entered idle mode. No timer needed until we exit idle. + return + } + + // Failed to enter idle mode due to a concurrent RPC that kept the channel + // active, or because of an error from the channel. Undo the attempt to + // enter idle, and reset the timer to try again later. + atomic.AddInt32(&i.activeCallsCount, math.MaxInt32) + i.resetIdleTimer(time.Duration(i.timeout)) +} + +// tryEnterIdleMode instructs the channel to enter idle mode. But before +// that, it performs a last minute check to ensure that no new RPC has come in, +// making the channel active. +// +// Return value indicates whether or not the channel moved to idle mode. +// +// Holds idleMu which ensures mutual exclusion with exitIdleMode. +func (i *idlenessManagerImpl) tryEnterIdleMode() bool { + i.idleMu.Lock() + defer i.idleMu.Unlock() + + if atomic.LoadInt32(&i.activeCallsCount) != -math.MaxInt32 { + // We raced and lost to a new RPC. Very rare, but stop entering idle. + return false + } + if atomic.LoadInt32(&i.activeSinceLastTimerCheck) == 1 { + // An very short RPC could have come in (and also finished) after we + // checked for calls count and activity in handleIdleTimeout(), but + // before the CAS operation. So, we need to check for activity again. + return false + } + + // No new RPCs have come in since we last set the active calls count value + // -math.MaxInt32 in the timer callback. And since we have the lock, it is + // safe to enter idle mode now. + if err := i.enforcer.enterIdleMode(); err != nil { + logger.Errorf("Failed to enter idle mode: %v", err) + return false + } + + // Successfully entered idle mode. + i.actuallyIdle = true + return true +} + +// onCallBegin is invoked at the start of every RPC. +func (i *idlenessManagerImpl) onCallBegin() error { + if i.isClosed() { + return nil + } + + if atomic.AddInt32(&i.activeCallsCount, 1) > 0 { + // Channel is not idle now. Set the activity bit and allow the call. + atomic.StoreInt32(&i.activeSinceLastTimerCheck, 1) + return nil + } + + // Channel is either in idle mode or is in the process of moving to idle + // mode. Attempt to exit idle mode to allow this RPC. + if err := i.exitIdleMode(); err != nil { + // Undo the increment to calls count, and return an error causing the + // RPC to fail. + atomic.AddInt32(&i.activeCallsCount, -1) + return err + } + + atomic.StoreInt32(&i.activeSinceLastTimerCheck, 1) + return nil +} + +// exitIdleMode instructs the channel to exit idle mode. +// +// Holds idleMu which ensures mutual exclusion with tryEnterIdleMode. +func (i *idlenessManagerImpl) exitIdleMode() error { + i.idleMu.Lock() + defer i.idleMu.Unlock() + + if !i.actuallyIdle { + // This can happen in two scenarios: + // - handleIdleTimeout() set the calls count to -math.MaxInt32 and called + // tryEnterIdleMode(). But before the latter could grab the lock, an RPC + // came in and onCallBegin() noticed that the calls count is negative. + // - Channel is in idle mode, and multiple new RPCs come in at the same + // time, all of them notice a negative calls count in onCallBegin and get + // here. The first one to get the lock would got the channel to exit idle. + // + // Either way, nothing to do here. + return nil + } + + if err := i.enforcer.exitIdleMode(); err != nil { + return fmt.Errorf("channel failed to exit idle mode: %v", err) + } + + // Undo the idle entry process. This also respects any new RPC attempts. + atomic.AddInt32(&i.activeCallsCount, math.MaxInt32) + i.actuallyIdle = false + + // Start a new timer to fire after the configured idle timeout. + i.timer = timeAfterFunc(time.Duration(i.timeout), i.handleIdleTimeout) + return nil +} + +// onCallEnd is invoked at the end of every RPC. +func (i *idlenessManagerImpl) onCallEnd() { + if i.isClosed() { + return + } + + // Record the time at which the most recent call finished. + atomic.StoreInt64(&i.lastCallEndTime, time.Now().UnixNano()) + + // Decrement the active calls count. This count can temporarily go negative + // when the timer callback is in the process of moving the channel to idle + // mode, but one or more RPCs come in and complete before the timer callback + // can get done with the process of moving to idle mode. + atomic.AddInt32(&i.activeCallsCount, -1) +} + +func (i *idlenessManagerImpl) isClosed() bool { + return atomic.LoadInt32(&i.closed) == 1 +} + +func (i *idlenessManagerImpl) close() { + atomic.StoreInt32(&i.closed, 1) + + i.idleMu.Lock() + i.timer.Stop() + i.timer = nil + i.idleMu.Unlock() +} diff --git a/vendor/google.golang.org/grpc/install_gae.sh b/vendor/google.golang.org/grpc/install_gae.sh deleted file mode 100644 index 15ff9facd..000000000 --- a/vendor/google.golang.org/grpc/install_gae.sh +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/bash - -TMP=$(mktemp -d /tmp/sdk.XXX) \ -&& curl -o $TMP.zip "https://storage.googleapis.com/appengine-sdks/featured/go_appengine_sdk_linux_amd64-1.9.68.zip" \ -&& unzip -q $TMP.zip -d $TMP \ -&& export PATH="$PATH:$TMP/go_appengine" \ No newline at end of file diff --git a/vendor/google.golang.org/grpc/interceptor.go b/vendor/google.golang.org/grpc/interceptor.go index 668e0adcf..bb96ef57b 100644 --- a/vendor/google.golang.org/grpc/interceptor.go +++ b/vendor/google.golang.org/grpc/interceptor.go @@ -72,9 +72,12 @@ type UnaryServerInfo struct { } // UnaryHandler defines the handler invoked by UnaryServerInterceptor to complete the normal -// execution of a unary RPC. If a UnaryHandler returns an error, it should be produced by the -// status package, or else gRPC will use codes.Unknown as the status code and err.Error() as -// the status message of the RPC. +// execution of a unary RPC. +// +// If a UnaryHandler returns an error, it should either be produced by the +// status package, or be one of the context errors. Otherwise, gRPC will use +// codes.Unknown as the status code and err.Error() as the status message of the +// RPC. type UnaryHandler func(ctx context.Context, req interface{}) (interface{}, error) // UnaryServerInterceptor provides a hook to intercept the execution of a unary RPC on the server. info diff --git a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go new file mode 100644 index 000000000..08666f62a --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go @@ -0,0 +1,384 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package gracefulswitch implements a graceful switch load balancer. +package gracefulswitch + +import ( + "errors" + "fmt" + "sync" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/base" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/resolver" +) + +var errBalancerClosed = errors.New("gracefulSwitchBalancer is closed") +var _ balancer.Balancer = (*Balancer)(nil) + +// NewBalancer returns a graceful switch Balancer. +func NewBalancer(cc balancer.ClientConn, opts balancer.BuildOptions) *Balancer { + return &Balancer{ + cc: cc, + bOpts: opts, + } +} + +// Balancer is a utility to gracefully switch from one balancer to +// a new balancer. It implements the balancer.Balancer interface. +type Balancer struct { + bOpts balancer.BuildOptions + cc balancer.ClientConn + + // mu protects the following fields and all fields within balancerCurrent + // and balancerPending. mu does not need to be held when calling into the + // child balancers, as all calls into these children happen only as a direct + // result of a call into the gracefulSwitchBalancer, which are also + // guaranteed to be synchronous. There is one exception: an UpdateState call + // from a child balancer when current and pending are populated can lead to + // calling Close() on the current. To prevent that racing with an + // UpdateSubConnState from the channel, we hold currentMu during Close and + // UpdateSubConnState calls. + mu sync.Mutex + balancerCurrent *balancerWrapper + balancerPending *balancerWrapper + closed bool // set to true when this balancer is closed + + // currentMu must be locked before mu. This mutex guards against this + // sequence of events: UpdateSubConnState() called, finds the + // balancerCurrent, gives up lock, updateState comes in, causes Close() on + // balancerCurrent before the UpdateSubConnState is called on the + // balancerCurrent. + currentMu sync.Mutex +} + +// swap swaps out the current lb with the pending lb and updates the ClientConn. +// The caller must hold gsb.mu. +func (gsb *Balancer) swap() { + gsb.cc.UpdateState(gsb.balancerPending.lastState) + cur := gsb.balancerCurrent + gsb.balancerCurrent = gsb.balancerPending + gsb.balancerPending = nil + go func() { + gsb.currentMu.Lock() + defer gsb.currentMu.Unlock() + cur.Close() + }() +} + +// Helper function that checks if the balancer passed in is current or pending. +// The caller must hold gsb.mu. +func (gsb *Balancer) balancerCurrentOrPending(bw *balancerWrapper) bool { + return bw == gsb.balancerCurrent || bw == gsb.balancerPending +} + +// SwitchTo initializes the graceful switch process, which completes based on +// connectivity state changes on the current/pending balancer. Thus, the switch +// process is not complete when this method returns. This method must be called +// synchronously alongside the rest of the balancer.Balancer methods this +// Graceful Switch Balancer implements. +func (gsb *Balancer) SwitchTo(builder balancer.Builder) error { + gsb.mu.Lock() + if gsb.closed { + gsb.mu.Unlock() + return errBalancerClosed + } + bw := &balancerWrapper{ + gsb: gsb, + lastState: balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: base.NewErrPicker(balancer.ErrNoSubConnAvailable), + }, + subconns: make(map[balancer.SubConn]bool), + } + balToClose := gsb.balancerPending // nil if there is no pending balancer + if gsb.balancerCurrent == nil { + gsb.balancerCurrent = bw + } else { + gsb.balancerPending = bw + } + gsb.mu.Unlock() + balToClose.Close() + // This function takes a builder instead of a balancer because builder.Build + // can call back inline, and this utility needs to handle the callbacks. + newBalancer := builder.Build(bw, gsb.bOpts) + if newBalancer == nil { + // This is illegal and should never happen; we clear the balancerWrapper + // we were constructing if it happens to avoid a potential panic. + gsb.mu.Lock() + if gsb.balancerPending != nil { + gsb.balancerPending = nil + } else { + gsb.balancerCurrent = nil + } + gsb.mu.Unlock() + return balancer.ErrBadResolverState + } + + // This write doesn't need to take gsb.mu because this field never gets read + // or written to on any calls from the current or pending. Calls from grpc + // to this balancer are guaranteed to be called synchronously, so this + // bw.Balancer field will never be forwarded to until this SwitchTo() + // function returns. + bw.Balancer = newBalancer + return nil +} + +// Returns nil if the graceful switch balancer is closed. +func (gsb *Balancer) latestBalancer() *balancerWrapper { + gsb.mu.Lock() + defer gsb.mu.Unlock() + if gsb.balancerPending != nil { + return gsb.balancerPending + } + return gsb.balancerCurrent +} + +// UpdateClientConnState forwards the update to the latest balancer created. +func (gsb *Balancer) UpdateClientConnState(state balancer.ClientConnState) error { + // The resolver data is only relevant to the most recent LB Policy. + balToUpdate := gsb.latestBalancer() + if balToUpdate == nil { + return errBalancerClosed + } + // Perform this call without gsb.mu to prevent deadlocks if the child calls + // back into the channel. The latest balancer can never be closed during a + // call from the channel, even without gsb.mu held. + return balToUpdate.UpdateClientConnState(state) +} + +// ResolverError forwards the error to the latest balancer created. +func (gsb *Balancer) ResolverError(err error) { + // The resolver data is only relevant to the most recent LB Policy. + balToUpdate := gsb.latestBalancer() + if balToUpdate == nil { + return + } + // Perform this call without gsb.mu to prevent deadlocks if the child calls + // back into the channel. The latest balancer can never be closed during a + // call from the channel, even without gsb.mu held. + balToUpdate.ResolverError(err) +} + +// ExitIdle forwards the call to the latest balancer created. +// +// If the latest balancer does not support ExitIdle, the subConns are +// re-connected to manually. +func (gsb *Balancer) ExitIdle() { + balToUpdate := gsb.latestBalancer() + if balToUpdate == nil { + return + } + // There is no need to protect this read with a mutex, as the write to the + // Balancer field happens in SwitchTo, which completes before this can be + // called. + if ei, ok := balToUpdate.Balancer.(balancer.ExitIdler); ok { + ei.ExitIdle() + return + } + gsb.mu.Lock() + defer gsb.mu.Unlock() + for sc := range balToUpdate.subconns { + sc.Connect() + } +} + +// UpdateSubConnState forwards the update to the appropriate child. +func (gsb *Balancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + gsb.currentMu.Lock() + defer gsb.currentMu.Unlock() + gsb.mu.Lock() + // Forward update to the appropriate child. Even if there is a pending + // balancer, the current balancer should continue to get SubConn updates to + // maintain the proper state while the pending is still connecting. + var balToUpdate *balancerWrapper + if gsb.balancerCurrent != nil && gsb.balancerCurrent.subconns[sc] { + balToUpdate = gsb.balancerCurrent + } else if gsb.balancerPending != nil && gsb.balancerPending.subconns[sc] { + balToUpdate = gsb.balancerPending + } + gsb.mu.Unlock() + if balToUpdate == nil { + // SubConn belonged to a stale lb policy that has not yet fully closed, + // or the balancer was already closed. + return + } + balToUpdate.UpdateSubConnState(sc, state) +} + +// Close closes any active child balancers. +func (gsb *Balancer) Close() { + gsb.mu.Lock() + gsb.closed = true + currentBalancerToClose := gsb.balancerCurrent + gsb.balancerCurrent = nil + pendingBalancerToClose := gsb.balancerPending + gsb.balancerPending = nil + gsb.mu.Unlock() + + currentBalancerToClose.Close() + pendingBalancerToClose.Close() +} + +// balancerWrapper wraps a balancer.Balancer, and overrides some Balancer +// methods to help cleanup SubConns created by the wrapped balancer. +// +// It implements the balancer.ClientConn interface and is passed down in that +// capacity to the wrapped balancer. It maintains a set of subConns created by +// the wrapped balancer and calls from the latter to create/update/remove +// SubConns update this set before being forwarded to the parent ClientConn. +// State updates from the wrapped balancer can result in invocation of the +// graceful switch logic. +type balancerWrapper struct { + balancer.Balancer + gsb *Balancer + + lastState balancer.State + subconns map[balancer.SubConn]bool // subconns created by this balancer +} + +func (bw *balancerWrapper) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + if state.ConnectivityState == connectivity.Shutdown { + bw.gsb.mu.Lock() + delete(bw.subconns, sc) + bw.gsb.mu.Unlock() + } + // There is no need to protect this read with a mutex, as the write to the + // Balancer field happens in SwitchTo, which completes before this can be + // called. + bw.Balancer.UpdateSubConnState(sc, state) +} + +// Close closes the underlying LB policy and removes the subconns it created. bw +// must not be referenced via balancerCurrent or balancerPending in gsb when +// called. gsb.mu must not be held. Does not panic with a nil receiver. +func (bw *balancerWrapper) Close() { + // before Close is called. + if bw == nil { + return + } + // There is no need to protect this read with a mutex, as Close() is + // impossible to be called concurrently with the write in SwitchTo(). The + // callsites of Close() for this balancer in Graceful Switch Balancer will + // never be called until SwitchTo() returns. + bw.Balancer.Close() + bw.gsb.mu.Lock() + for sc := range bw.subconns { + bw.gsb.cc.RemoveSubConn(sc) + } + bw.gsb.mu.Unlock() +} + +func (bw *balancerWrapper) UpdateState(state balancer.State) { + // Hold the mutex for this entire call to ensure it cannot occur + // concurrently with other updateState() calls. This causes updates to + // lastState and calls to cc.UpdateState to happen atomically. + bw.gsb.mu.Lock() + defer bw.gsb.mu.Unlock() + bw.lastState = state + + if !bw.gsb.balancerCurrentOrPending(bw) { + return + } + + if bw == bw.gsb.balancerCurrent { + // In the case that the current balancer exits READY, and there is a pending + // balancer, you can forward the pending balancer's cached State up to + // ClientConn and swap the pending into the current. This is because there + // is no reason to gracefully switch from and keep using the old policy as + // the ClientConn is not connected to any backends. + if state.ConnectivityState != connectivity.Ready && bw.gsb.balancerPending != nil { + bw.gsb.swap() + return + } + // Even if there is a pending balancer waiting to be gracefully switched to, + // continue to forward current balancer updates to the Client Conn. Ignoring + // state + picker from the current would cause undefined behavior/cause the + // system to behave incorrectly from the current LB policies perspective. + // Also, the current LB is still being used by grpc to choose SubConns per + // RPC, and thus should use the most updated form of the current balancer. + bw.gsb.cc.UpdateState(state) + return + } + // This method is now dealing with a state update from the pending balancer. + // If the current balancer is currently in a state other than READY, the new + // policy can be swapped into place immediately. This is because there is no + // reason to gracefully switch from and keep using the old policy as the + // ClientConn is not connected to any backends. + if state.ConnectivityState != connectivity.Connecting || bw.gsb.balancerCurrent.lastState.ConnectivityState != connectivity.Ready { + bw.gsb.swap() + } +} + +func (bw *balancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { + bw.gsb.mu.Lock() + if !bw.gsb.balancerCurrentOrPending(bw) { + bw.gsb.mu.Unlock() + return nil, fmt.Errorf("%T at address %p that called NewSubConn is deleted", bw, bw) + } + bw.gsb.mu.Unlock() + + sc, err := bw.gsb.cc.NewSubConn(addrs, opts) + if err != nil { + return nil, err + } + bw.gsb.mu.Lock() + if !bw.gsb.balancerCurrentOrPending(bw) { // balancer was closed during this call + bw.gsb.cc.RemoveSubConn(sc) + bw.gsb.mu.Unlock() + return nil, fmt.Errorf("%T at address %p that called NewSubConn is deleted", bw, bw) + } + bw.subconns[sc] = true + bw.gsb.mu.Unlock() + return sc, nil +} + +func (bw *balancerWrapper) ResolveNow(opts resolver.ResolveNowOptions) { + // Ignore ResolveNow requests from anything other than the most recent + // balancer, because older balancers were already removed from the config. + if bw != bw.gsb.latestBalancer() { + return + } + bw.gsb.cc.ResolveNow(opts) +} + +func (bw *balancerWrapper) RemoveSubConn(sc balancer.SubConn) { + bw.gsb.mu.Lock() + if !bw.gsb.balancerCurrentOrPending(bw) { + bw.gsb.mu.Unlock() + return + } + bw.gsb.mu.Unlock() + bw.gsb.cc.RemoveSubConn(sc) +} + +func (bw *balancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { + bw.gsb.mu.Lock() + if !bw.gsb.balancerCurrentOrPending(bw) { + bw.gsb.mu.Unlock() + return + } + bw.gsb.mu.Unlock() + bw.gsb.cc.UpdateAddresses(sc, addrs) +} + +func (bw *balancerWrapper) Target() string { + return bw.gsb.cc.Target() +} diff --git a/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go b/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go index 5cc3aeddb..755fdebc1 100644 --- a/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go +++ b/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go @@ -28,38 +28,48 @@ import ( "google.golang.org/grpc/internal/grpcutil" ) -// Logger is the global binary logger. It can be used to get binary logger for -// each method. +var grpclogLogger = grpclog.Component("binarylog") + +// Logger specifies MethodLoggers for method names with a Log call that +// takes a context. +// +// This is used in the 1.0 release of gcp/observability, and thus must not be +// deleted or changed. type Logger interface { - getMethodLogger(methodName string) *MethodLogger + GetMethodLogger(methodName string) MethodLogger } // binLogger is the global binary logger for the binary. One of this should be // built at init time from the configuration (environment variable or flags). // -// It is used to get a methodLogger for each individual method. +// It is used to get a MethodLogger for each individual method. var binLogger Logger -var grpclogLogger = grpclog.Component("binarylog") - -// SetLogger sets the binarg logger. +// SetLogger sets the binary logger. // // Only call this at init time. func SetLogger(l Logger) { binLogger = l } -// GetMethodLogger returns the methodLogger for the given methodName. +// GetLogger gets the binary logger. +// +// Only call this at init time. +func GetLogger() Logger { + return binLogger +} + +// GetMethodLogger returns the MethodLogger for the given methodName. // // methodName should be in the format of "/service/method". // -// Each methodLogger returned by this method is a new instance. This is to +// Each MethodLogger returned by this method is a new instance. This is to // generate sequence id within the call. -func GetMethodLogger(methodName string) *MethodLogger { +func GetMethodLogger(methodName string) MethodLogger { if binLogger == nil { return nil } - return binLogger.getMethodLogger(methodName) + return binLogger.GetMethodLogger(methodName) } func init() { @@ -68,17 +78,29 @@ func init() { binLogger = NewLoggerFromConfigString(configStr) } -type methodLoggerConfig struct { +// MethodLoggerConfig contains the setting for logging behavior of a method +// logger. Currently, it contains the max length of header and message. +type MethodLoggerConfig struct { // Max length of header and message. - hdr, msg uint64 + Header, Message uint64 +} + +// LoggerConfig contains the config for loggers to create method loggers. +type LoggerConfig struct { + All *MethodLoggerConfig + Services map[string]*MethodLoggerConfig + Methods map[string]*MethodLoggerConfig + + Blacklist map[string]struct{} } type logger struct { - all *methodLoggerConfig - services map[string]*methodLoggerConfig - methods map[string]*methodLoggerConfig + config LoggerConfig +} - blacklist map[string]struct{} +// NewLoggerFromConfig builds a logger with the given LoggerConfig. +func NewLoggerFromConfig(config LoggerConfig) Logger { + return &logger{config: config} } // newEmptyLogger creates an empty logger. The map fields need to be filled in @@ -88,83 +110,83 @@ func newEmptyLogger() *logger { } // Set method logger for "*". -func (l *logger) setDefaultMethodLogger(ml *methodLoggerConfig) error { - if l.all != nil { +func (l *logger) setDefaultMethodLogger(ml *MethodLoggerConfig) error { + if l.config.All != nil { return fmt.Errorf("conflicting global rules found") } - l.all = ml + l.config.All = ml return nil } // Set method logger for "service/*". // -// New methodLogger with same service overrides the old one. -func (l *logger) setServiceMethodLogger(service string, ml *methodLoggerConfig) error { - if _, ok := l.services[service]; ok { +// New MethodLogger with same service overrides the old one. +func (l *logger) setServiceMethodLogger(service string, ml *MethodLoggerConfig) error { + if _, ok := l.config.Services[service]; ok { return fmt.Errorf("conflicting service rules for service %v found", service) } - if l.services == nil { - l.services = make(map[string]*methodLoggerConfig) + if l.config.Services == nil { + l.config.Services = make(map[string]*MethodLoggerConfig) } - l.services[service] = ml + l.config.Services[service] = ml return nil } // Set method logger for "service/method". // -// New methodLogger with same method overrides the old one. -func (l *logger) setMethodMethodLogger(method string, ml *methodLoggerConfig) error { - if _, ok := l.blacklist[method]; ok { +// New MethodLogger with same method overrides the old one. +func (l *logger) setMethodMethodLogger(method string, ml *MethodLoggerConfig) error { + if _, ok := l.config.Blacklist[method]; ok { return fmt.Errorf("conflicting blacklist rules for method %v found", method) } - if _, ok := l.methods[method]; ok { + if _, ok := l.config.Methods[method]; ok { return fmt.Errorf("conflicting method rules for method %v found", method) } - if l.methods == nil { - l.methods = make(map[string]*methodLoggerConfig) + if l.config.Methods == nil { + l.config.Methods = make(map[string]*MethodLoggerConfig) } - l.methods[method] = ml + l.config.Methods[method] = ml return nil } // Set blacklist method for "-service/method". func (l *logger) setBlacklist(method string) error { - if _, ok := l.blacklist[method]; ok { + if _, ok := l.config.Blacklist[method]; ok { return fmt.Errorf("conflicting blacklist rules for method %v found", method) } - if _, ok := l.methods[method]; ok { + if _, ok := l.config.Methods[method]; ok { return fmt.Errorf("conflicting method rules for method %v found", method) } - if l.blacklist == nil { - l.blacklist = make(map[string]struct{}) + if l.config.Blacklist == nil { + l.config.Blacklist = make(map[string]struct{}) } - l.blacklist[method] = struct{}{} + l.config.Blacklist[method] = struct{}{} return nil } -// getMethodLogger returns the methodLogger for the given methodName. +// getMethodLogger returns the MethodLogger for the given methodName. // // methodName should be in the format of "/service/method". // -// Each methodLogger returned by this method is a new instance. This is to +// Each MethodLogger returned by this method is a new instance. This is to // generate sequence id within the call. -func (l *logger) getMethodLogger(methodName string) *MethodLogger { +func (l *logger) GetMethodLogger(methodName string) MethodLogger { s, m, err := grpcutil.ParseMethod(methodName) if err != nil { grpclogLogger.Infof("binarylogging: failed to parse %q: %v", methodName, err) return nil } - if ml, ok := l.methods[s+"/"+m]; ok { - return newMethodLogger(ml.hdr, ml.msg) + if ml, ok := l.config.Methods[s+"/"+m]; ok { + return NewTruncatingMethodLogger(ml.Header, ml.Message) } - if _, ok := l.blacklist[s+"/"+m]; ok { + if _, ok := l.config.Blacklist[s+"/"+m]; ok { return nil } - if ml, ok := l.services[s]; ok { - return newMethodLogger(ml.hdr, ml.msg) + if ml, ok := l.config.Services[s]; ok { + return NewTruncatingMethodLogger(ml.Header, ml.Message) } - if l.all == nil { + if l.config.All == nil { return nil } - return newMethodLogger(l.all.hdr, l.all.msg) + return NewTruncatingMethodLogger(l.config.All.Header, l.config.All.Message) } diff --git a/vendor/google.golang.org/grpc/internal/binarylog/env_config.go b/vendor/google.golang.org/grpc/internal/binarylog/env_config.go index d8f4e7602..f9e80e27a 100644 --- a/vendor/google.golang.org/grpc/internal/binarylog/env_config.go +++ b/vendor/google.golang.org/grpc/internal/binarylog/env_config.go @@ -30,15 +30,15 @@ import ( // to build a new logger and assign it to binarylog.Logger. // // Example filter config strings: -// - "" Nothing will be logged -// - "*" All headers and messages will be fully logged. -// - "*{h}" Only headers will be logged. -// - "*{m:256}" Only the first 256 bytes of each message will be logged. -// - "Foo/*" Logs every method in service Foo -// - "Foo/*,-Foo/Bar" Logs every method in service Foo except method /Foo/Bar -// - "Foo/*,Foo/Bar{m:256}" Logs the first 256 bytes of each message in method -// /Foo/Bar, logs all headers and messages in every other method in service -// Foo. +// - "" Nothing will be logged +// - "*" All headers and messages will be fully logged. +// - "*{h}" Only headers will be logged. +// - "*{m:256}" Only the first 256 bytes of each message will be logged. +// - "Foo/*" Logs every method in service Foo +// - "Foo/*,-Foo/Bar" Logs every method in service Foo except method /Foo/Bar +// - "Foo/*,Foo/Bar{m:256}" Logs the first 256 bytes of each message in method +// /Foo/Bar, logs all headers and messages in every other method in service +// Foo. // // If two configs exist for one certain method or service, the one specified // later overrides the previous config. @@ -57,7 +57,7 @@ func NewLoggerFromConfigString(s string) Logger { return l } -// fillMethodLoggerWithConfigString parses config, creates methodLogger and adds +// fillMethodLoggerWithConfigString parses config, creates TruncatingMethodLogger and adds // it to the right map in the logger. func (l *logger) fillMethodLoggerWithConfigString(config string) error { // "" is invalid. @@ -89,7 +89,7 @@ func (l *logger) fillMethodLoggerWithConfigString(config string) error { if err != nil { return fmt.Errorf("invalid config: %q, %v", config, err) } - if err := l.setDefaultMethodLogger(&methodLoggerConfig{hdr: hdr, msg: msg}); err != nil { + if err := l.setDefaultMethodLogger(&MethodLoggerConfig{Header: hdr, Message: msg}); err != nil { return fmt.Errorf("invalid config: %v", err) } return nil @@ -104,11 +104,11 @@ func (l *logger) fillMethodLoggerWithConfigString(config string) error { return fmt.Errorf("invalid header/message length config: %q, %v", suffix, err) } if m == "*" { - if err := l.setServiceMethodLogger(s, &methodLoggerConfig{hdr: hdr, msg: msg}); err != nil { + if err := l.setServiceMethodLogger(s, &MethodLoggerConfig{Header: hdr, Message: msg}); err != nil { return fmt.Errorf("invalid config: %v", err) } } else { - if err := l.setMethodMethodLogger(s+"/"+m, &methodLoggerConfig{hdr: hdr, msg: msg}); err != nil { + if err := l.setMethodMethodLogger(s+"/"+m, &MethodLoggerConfig{Header: hdr, Message: msg}); err != nil { return fmt.Errorf("invalid config: %v", err) } } diff --git a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go index 0cdb41831..6c3f63221 100644 --- a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go +++ b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go @@ -19,6 +19,7 @@ package binarylog import ( + "context" "net" "strings" "sync/atomic" @@ -26,7 +27,7 @@ import ( "github.com/golang/protobuf/proto" "github.com/golang/protobuf/ptypes" - pb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" + binlogpb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" ) @@ -48,7 +49,16 @@ func (g *callIDGenerator) reset() { var idGen callIDGenerator // MethodLogger is the sub-logger for each method. -type MethodLogger struct { +// +// This is used in the 1.0 release of gcp/observability, and thus must not be +// deleted or changed. +type MethodLogger interface { + Log(context.Context, LogEntryConfig) +} + +// TruncatingMethodLogger is a method logger that truncates headers and messages +// based on configured fields. +type TruncatingMethodLogger struct { headerMaxLen, messageMaxLen uint64 callID uint64 @@ -57,8 +67,12 @@ type MethodLogger struct { sink Sink // TODO(blog): make this plugable. } -func newMethodLogger(h, m uint64) *MethodLogger { - return &MethodLogger{ +// NewTruncatingMethodLogger returns a new truncating method logger. +// +// This is used in the 1.0 release of gcp/observability, and thus must not be +// deleted or changed. +func NewTruncatingMethodLogger(h, m uint64) *TruncatingMethodLogger { + return &TruncatingMethodLogger{ headerMaxLen: h, messageMaxLen: m, @@ -69,8 +83,10 @@ func newMethodLogger(h, m uint64) *MethodLogger { } } -// Log creates a proto binary log entry, and logs it to the sink. -func (ml *MethodLogger) Log(c LogEntryConfig) { +// Build is an internal only method for building the proto message out of the +// input event. It's made public to enable other library to reuse as much logic +// in TruncatingMethodLogger as possible. +func (ml *TruncatingMethodLogger) Build(c LogEntryConfig) *binlogpb.GrpcLogEntry { m := c.toProto() timestamp, _ := ptypes.TimestampProto(time.Now()) m.Timestamp = timestamp @@ -78,18 +94,22 @@ func (ml *MethodLogger) Log(c LogEntryConfig) { m.SequenceIdWithinCall = ml.idWithinCallGen.next() switch pay := m.Payload.(type) { - case *pb.GrpcLogEntry_ClientHeader: + case *binlogpb.GrpcLogEntry_ClientHeader: m.PayloadTruncated = ml.truncateMetadata(pay.ClientHeader.GetMetadata()) - case *pb.GrpcLogEntry_ServerHeader: + case *binlogpb.GrpcLogEntry_ServerHeader: m.PayloadTruncated = ml.truncateMetadata(pay.ServerHeader.GetMetadata()) - case *pb.GrpcLogEntry_Message: + case *binlogpb.GrpcLogEntry_Message: m.PayloadTruncated = ml.truncateMessage(pay.Message) } + return m +} - ml.sink.Write(m) +// Log creates a proto binary log entry, and logs it to the sink. +func (ml *TruncatingMethodLogger) Log(ctx context.Context, c LogEntryConfig) { + ml.sink.Write(ml.Build(c)) } -func (ml *MethodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated bool) { +func (ml *TruncatingMethodLogger) truncateMetadata(mdPb *binlogpb.Metadata) (truncated bool) { if ml.headerMaxLen == maxUInt { return false } @@ -108,7 +128,7 @@ func (ml *MethodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated bool) { // but not counted towards the size limit. continue } - currentEntryLen := uint64(len(entry.Value)) + currentEntryLen := uint64(len(entry.GetKey())) + uint64(len(entry.GetValue())) if currentEntryLen > bytesLimit { break } @@ -119,7 +139,7 @@ func (ml *MethodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated bool) { return truncated } -func (ml *MethodLogger) truncateMessage(msgPb *pb.Message) (truncated bool) { +func (ml *TruncatingMethodLogger) truncateMessage(msgPb *binlogpb.Message) (truncated bool) { if ml.messageMaxLen == maxUInt { return false } @@ -131,8 +151,11 @@ func (ml *MethodLogger) truncateMessage(msgPb *pb.Message) (truncated bool) { } // LogEntryConfig represents the configuration for binary log entry. +// +// This is used in the 1.0 release of gcp/observability, and thus must not be +// deleted or changed. type LogEntryConfig interface { - toProto() *pb.GrpcLogEntry + toProto() *binlogpb.GrpcLogEntry } // ClientHeader configs the binary log entry to be a ClientHeader entry. @@ -146,10 +169,10 @@ type ClientHeader struct { PeerAddr net.Addr } -func (c *ClientHeader) toProto() *pb.GrpcLogEntry { +func (c *ClientHeader) toProto() *binlogpb.GrpcLogEntry { // This function doesn't need to set all the fields (e.g. seq ID). The Log // function will set the fields when necessary. - clientHeader := &pb.ClientHeader{ + clientHeader := &binlogpb.ClientHeader{ Metadata: mdToMetadataProto(c.Header), MethodName: c.MethodName, Authority: c.Authority, @@ -157,16 +180,16 @@ func (c *ClientHeader) toProto() *pb.GrpcLogEntry { if c.Timeout > 0 { clientHeader.Timeout = ptypes.DurationProto(c.Timeout) } - ret := &pb.GrpcLogEntry{ - Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER, - Payload: &pb.GrpcLogEntry_ClientHeader{ + ret := &binlogpb.GrpcLogEntry{ + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER, + Payload: &binlogpb.GrpcLogEntry_ClientHeader{ ClientHeader: clientHeader, }, } if c.OnClientSide { - ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT } else { - ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER } if c.PeerAddr != nil { ret.Peer = addrToProto(c.PeerAddr) @@ -182,19 +205,19 @@ type ServerHeader struct { PeerAddr net.Addr } -func (c *ServerHeader) toProto() *pb.GrpcLogEntry { - ret := &pb.GrpcLogEntry{ - Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_HEADER, - Payload: &pb.GrpcLogEntry_ServerHeader{ - ServerHeader: &pb.ServerHeader{ +func (c *ServerHeader) toProto() *binlogpb.GrpcLogEntry { + ret := &binlogpb.GrpcLogEntry{ + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_HEADER, + Payload: &binlogpb.GrpcLogEntry_ServerHeader{ + ServerHeader: &binlogpb.ServerHeader{ Metadata: mdToMetadataProto(c.Header), }, }, } if c.OnClientSide { - ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT } else { - ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER } if c.PeerAddr != nil { ret.Peer = addrToProto(c.PeerAddr) @@ -210,7 +233,7 @@ type ClientMessage struct { Message interface{} } -func (c *ClientMessage) toProto() *pb.GrpcLogEntry { +func (c *ClientMessage) toProto() *binlogpb.GrpcLogEntry { var ( data []byte err error @@ -225,19 +248,19 @@ func (c *ClientMessage) toProto() *pb.GrpcLogEntry { } else { grpclogLogger.Infof("binarylogging: message to log is neither proto.message nor []byte") } - ret := &pb.GrpcLogEntry{ - Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE, - Payload: &pb.GrpcLogEntry_Message{ - Message: &pb.Message{ + ret := &binlogpb.GrpcLogEntry{ + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE, + Payload: &binlogpb.GrpcLogEntry_Message{ + Message: &binlogpb.Message{ Length: uint32(len(data)), Data: data, }, }, } if c.OnClientSide { - ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT } else { - ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER } return ret } @@ -250,7 +273,7 @@ type ServerMessage struct { Message interface{} } -func (c *ServerMessage) toProto() *pb.GrpcLogEntry { +func (c *ServerMessage) toProto() *binlogpb.GrpcLogEntry { var ( data []byte err error @@ -265,19 +288,19 @@ func (c *ServerMessage) toProto() *pb.GrpcLogEntry { } else { grpclogLogger.Infof("binarylogging: message to log is neither proto.message nor []byte") } - ret := &pb.GrpcLogEntry{ - Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE, - Payload: &pb.GrpcLogEntry_Message{ - Message: &pb.Message{ + ret := &binlogpb.GrpcLogEntry{ + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE, + Payload: &binlogpb.GrpcLogEntry_Message{ + Message: &binlogpb.Message{ Length: uint32(len(data)), Data: data, }, }, } if c.OnClientSide { - ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT } else { - ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER } return ret } @@ -287,15 +310,15 @@ type ClientHalfClose struct { OnClientSide bool } -func (c *ClientHalfClose) toProto() *pb.GrpcLogEntry { - ret := &pb.GrpcLogEntry{ - Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_HALF_CLOSE, +func (c *ClientHalfClose) toProto() *binlogpb.GrpcLogEntry { + ret := &binlogpb.GrpcLogEntry{ + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_HALF_CLOSE, Payload: nil, // No payload here. } if c.OnClientSide { - ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT } else { - ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER } return ret } @@ -311,7 +334,7 @@ type ServerTrailer struct { PeerAddr net.Addr } -func (c *ServerTrailer) toProto() *pb.GrpcLogEntry { +func (c *ServerTrailer) toProto() *binlogpb.GrpcLogEntry { st, ok := status.FromError(c.Err) if !ok { grpclogLogger.Info("binarylogging: error in trailer is not a status error") @@ -327,10 +350,10 @@ func (c *ServerTrailer) toProto() *pb.GrpcLogEntry { grpclogLogger.Infof("binarylogging: failed to marshal status proto: %v", err) } } - ret := &pb.GrpcLogEntry{ - Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER, - Payload: &pb.GrpcLogEntry_Trailer{ - Trailer: &pb.Trailer{ + ret := &binlogpb.GrpcLogEntry{ + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER, + Payload: &binlogpb.GrpcLogEntry_Trailer{ + Trailer: &binlogpb.Trailer{ Metadata: mdToMetadataProto(c.Trailer), StatusCode: uint32(st.Code()), StatusMessage: st.Message(), @@ -339,9 +362,9 @@ func (c *ServerTrailer) toProto() *pb.GrpcLogEntry { }, } if c.OnClientSide { - ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT } else { - ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER } if c.PeerAddr != nil { ret.Peer = addrToProto(c.PeerAddr) @@ -354,15 +377,15 @@ type Cancel struct { OnClientSide bool } -func (c *Cancel) toProto() *pb.GrpcLogEntry { - ret := &pb.GrpcLogEntry{ - Type: pb.GrpcLogEntry_EVENT_TYPE_CANCEL, +func (c *Cancel) toProto() *binlogpb.GrpcLogEntry { + ret := &binlogpb.GrpcLogEntry{ + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CANCEL, Payload: nil, } if c.OnClientSide { - ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT } else { - ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER } return ret } @@ -379,15 +402,15 @@ func metadataKeyOmit(key string) bool { return strings.HasPrefix(key, "grpc-") } -func mdToMetadataProto(md metadata.MD) *pb.Metadata { - ret := &pb.Metadata{} +func mdToMetadataProto(md metadata.MD) *binlogpb.Metadata { + ret := &binlogpb.Metadata{} for k, vv := range md { if metadataKeyOmit(k) { continue } for _, v := range vv { ret.Entry = append(ret.Entry, - &pb.MetadataEntry{ + &binlogpb.MetadataEntry{ Key: k, Value: []byte(v), }, @@ -397,26 +420,26 @@ func mdToMetadataProto(md metadata.MD) *pb.Metadata { return ret } -func addrToProto(addr net.Addr) *pb.Address { - ret := &pb.Address{} +func addrToProto(addr net.Addr) *binlogpb.Address { + ret := &binlogpb.Address{} switch a := addr.(type) { case *net.TCPAddr: if a.IP.To4() != nil { - ret.Type = pb.Address_TYPE_IPV4 + ret.Type = binlogpb.Address_TYPE_IPV4 } else if a.IP.To16() != nil { - ret.Type = pb.Address_TYPE_IPV6 + ret.Type = binlogpb.Address_TYPE_IPV6 } else { - ret.Type = pb.Address_TYPE_UNKNOWN + ret.Type = binlogpb.Address_TYPE_UNKNOWN // Do not set address and port fields. break } ret.Address = a.IP.String() ret.IpPort = uint32(a.Port) case *net.UnixAddr: - ret.Type = pb.Address_TYPE_UNIX + ret.Type = binlogpb.Address_TYPE_UNIX ret.Address = a.String() default: - ret.Type = pb.Address_TYPE_UNKNOWN + ret.Type = binlogpb.Address_TYPE_UNKNOWN } return ret } diff --git a/vendor/google.golang.org/grpc/internal/binarylog/sink.go b/vendor/google.golang.org/grpc/internal/binarylog/sink.go index c2fdd58b3..264de387c 100644 --- a/vendor/google.golang.org/grpc/internal/binarylog/sink.go +++ b/vendor/google.golang.org/grpc/internal/binarylog/sink.go @@ -26,7 +26,7 @@ import ( "time" "github.com/golang/protobuf/proto" - pb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" + binlogpb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" ) var ( @@ -42,15 +42,15 @@ type Sink interface { // Write will be called to write the log entry into the sink. // // It should be thread-safe so it can be called in parallel. - Write(*pb.GrpcLogEntry) error + Write(*binlogpb.GrpcLogEntry) error // Close will be called when the Sink is replaced by a new Sink. Close() error } type noopSink struct{} -func (ns *noopSink) Write(*pb.GrpcLogEntry) error { return nil } -func (ns *noopSink) Close() error { return nil } +func (ns *noopSink) Write(*binlogpb.GrpcLogEntry) error { return nil } +func (ns *noopSink) Close() error { return nil } // newWriterSink creates a binary log sink with the given writer. // @@ -66,7 +66,7 @@ type writerSink struct { out io.Writer } -func (ws *writerSink) Write(e *pb.GrpcLogEntry) error { +func (ws *writerSink) Write(e *binlogpb.GrpcLogEntry) error { b, err := proto.Marshal(e) if err != nil { grpclogLogger.Errorf("binary logging: failed to marshal proto message: %v", err) @@ -96,7 +96,7 @@ type bufferedSink struct { done chan struct{} } -func (fs *bufferedSink) Write(e *pb.GrpcLogEntry) error { +func (fs *bufferedSink) Write(e *binlogpb.GrpcLogEntry) error { fs.mu.Lock() defer fs.mu.Unlock() if !fs.flusherStarted { diff --git a/vendor/google.golang.org/grpc/internal/buffer/unbounded.go b/vendor/google.golang.org/grpc/internal/buffer/unbounded.go index 9f6a0c120..81c2f5fd7 100644 --- a/vendor/google.golang.org/grpc/internal/buffer/unbounded.go +++ b/vendor/google.golang.org/grpc/internal/buffer/unbounded.go @@ -35,6 +35,7 @@ import "sync" // internal/transport/transport.go for an example of this. type Unbounded struct { c chan interface{} + closed bool mu sync.Mutex backlog []interface{} } @@ -47,16 +48,18 @@ func NewUnbounded() *Unbounded { // Put adds t to the unbounded buffer. func (b *Unbounded) Put(t interface{}) { b.mu.Lock() + defer b.mu.Unlock() + if b.closed { + return + } if len(b.backlog) == 0 { select { case b.c <- t: - b.mu.Unlock() return default: } } b.backlog = append(b.backlog, t) - b.mu.Unlock() } // Load sends the earliest buffered data, if any, onto the read channel @@ -64,6 +67,10 @@ func (b *Unbounded) Put(t interface{}) { // value from the read channel. func (b *Unbounded) Load() { b.mu.Lock() + defer b.mu.Unlock() + if b.closed { + return + } if len(b.backlog) > 0 { select { case b.c <- b.backlog[0]: @@ -72,7 +79,6 @@ func (b *Unbounded) Load() { default: } } - b.mu.Unlock() } // Get returns a read channel on which values added to the buffer, via Put(), @@ -80,6 +86,20 @@ func (b *Unbounded) Load() { // // Upon reading a value from this channel, users are expected to call Load() to // send the next buffered value onto the channel if there is any. +// +// If the unbounded buffer is closed, the read channel returned by this method +// is closed. func (b *Unbounded) Get() <-chan interface{} { return b.c } + +// Close closes the unbounded buffer. +func (b *Unbounded) Close() { + b.mu.Lock() + defer b.mu.Unlock() + if b.closed { + return + } + b.closed = true + close(b.c) +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/funcs.go b/vendor/google.golang.org/grpc/internal/channelz/funcs.go index f73141393..777cbcd79 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/funcs.go +++ b/vendor/google.golang.org/grpc/internal/channelz/funcs.go @@ -24,6 +24,8 @@ package channelz import ( + "context" + "errors" "fmt" "sort" "sync" @@ -49,7 +51,8 @@ var ( // TurnOn turns on channelz data collection. func TurnOn() { if !IsOn() { - NewChannelzStorage() + db.set(newChannelMap()) + idGen.reset() atomic.StoreInt32(&curState, 1) } } @@ -94,46 +97,40 @@ func (d *dbWrapper) get() *channelMap { return d.DB } -// NewChannelzStorage initializes channelz data storage and id generator. +// NewChannelzStorageForTesting initializes channelz data storage and id +// generator for testing purposes. // -// This function returns a cleanup function to wait for all channelz state to be reset by the -// grpc goroutines when those entities get closed. By using this cleanup function, we make sure tests -// don't mess up each other, i.e. lingering goroutine from previous test doing entity removal happen -// to remove some entity just register by the new test, since the id space is the same. -// -// Note: This function is exported for testing purpose only. User should not call -// it in most cases. -func NewChannelzStorage() (cleanup func() error) { - db.set(&channelMap{ - topLevelChannels: make(map[int64]struct{}), - channels: make(map[int64]*channel), - listenSockets: make(map[int64]*listenSocket), - normalSockets: make(map[int64]*normalSocket), - servers: make(map[int64]*server), - subChannels: make(map[int64]*subChannel), - }) +// Returns a cleanup function to be invoked by the test, which waits for up to +// 10s for all channelz state to be reset by the grpc goroutines when those +// entities get closed. This cleanup function helps with ensuring that tests +// don't mess up each other. +func NewChannelzStorageForTesting() (cleanup func() error) { + db.set(newChannelMap()) idGen.reset() + return func() error { - var err error cm := db.get() if cm == nil { return nil } - for i := 0; i < 1000; i++ { - cm.mu.Lock() - if len(cm.topLevelChannels) == 0 && len(cm.servers) == 0 && len(cm.channels) == 0 && len(cm.subChannels) == 0 && len(cm.listenSockets) == 0 && len(cm.normalSockets) == 0 { - cm.mu.Unlock() - // all things stored in the channelz map have been cleared. + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + ticker := time.NewTicker(10 * time.Millisecond) + defer ticker.Stop() + for { + cm.mu.RLock() + topLevelChannels, servers, channels, subChannels, listenSockets, normalSockets := len(cm.topLevelChannels), len(cm.servers), len(cm.channels), len(cm.subChannels), len(cm.listenSockets), len(cm.normalSockets) + cm.mu.RUnlock() + + if err := ctx.Err(); err != nil { + return fmt.Errorf("after 10s the channelz map has not been cleaned up yet, topchannels: %d, servers: %d, channels: %d, subchannels: %d, listen sockets: %d, normal sockets: %d", topLevelChannels, servers, channels, subChannels, listenSockets, normalSockets) + } + if topLevelChannels == 0 && servers == 0 && channels == 0 && subChannels == 0 && listenSockets == 0 && normalSockets == 0 { return nil } - cm.mu.Unlock() - time.Sleep(10 * time.Millisecond) + <-ticker.C } - - cm.mu.Lock() - err = fmt.Errorf("after 10s the channelz map has not been cleaned up yet, topchannels: %d, servers: %d, channels: %d, subchannels: %d, listen sockets: %d, normal sockets: %d", len(cm.topLevelChannels), len(cm.servers), len(cm.channels), len(cm.subChannels), len(cm.listenSockets), len(cm.normalSockets)) - cm.mu.Unlock() - return err } } @@ -188,54 +185,77 @@ func GetServer(id int64) *ServerMetric { return db.get().GetServer(id) } -// RegisterChannel registers the given channel c in channelz database with ref -// as its reference name, and add it to the child list of its parent (identified -// by pid). pid = 0 means no parent. It returns the unique channelz tracking id -// assigned to this channel. -func RegisterChannel(c Channel, pid int64, ref string) int64 { +// RegisterChannel registers the given channel c in the channelz database with +// ref as its reference name, and adds it to the child list of its parent +// (identified by pid). pid == nil means no parent. +// +// Returns a unique channelz identifier assigned to this channel. +// +// If channelz is not turned ON, the channelz database is not mutated. +func RegisterChannel(c Channel, pid *Identifier, ref string) *Identifier { id := idGen.genID() + var parent int64 + isTopChannel := true + if pid != nil { + isTopChannel = false + parent = pid.Int() + } + + if !IsOn() { + return newIdentifer(RefChannel, id, pid) + } + cn := &channel{ refName: ref, c: c, subChans: make(map[int64]string), nestedChans: make(map[int64]string), id: id, - pid: pid, + pid: parent, trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())}, } - if pid == 0 { - db.get().addChannel(id, cn, true, pid, ref) - } else { - db.get().addChannel(id, cn, false, pid, ref) - } - return id + db.get().addChannel(id, cn, isTopChannel, parent) + return newIdentifer(RefChannel, id, pid) } -// RegisterSubChannel registers the given channel c in channelz database with ref -// as its reference name, and add it to the child list of its parent (identified -// by pid). It returns the unique channelz tracking id assigned to this subchannel. -func RegisterSubChannel(c Channel, pid int64, ref string) int64 { - if pid == 0 { - logger.Error("a SubChannel's parent id cannot be 0") - return 0 +// RegisterSubChannel registers the given subChannel c in the channelz database +// with ref as its reference name, and adds it to the child list of its parent +// (identified by pid). +// +// Returns a unique channelz identifier assigned to this subChannel. +// +// If channelz is not turned ON, the channelz database is not mutated. +func RegisterSubChannel(c Channel, pid *Identifier, ref string) (*Identifier, error) { + if pid == nil { + return nil, errors.New("a SubChannel's parent id cannot be nil") } id := idGen.genID() + if !IsOn() { + return newIdentifer(RefSubChannel, id, pid), nil + } + sc := &subChannel{ refName: ref, c: c, sockets: make(map[int64]string), id: id, - pid: pid, + pid: pid.Int(), trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())}, } - db.get().addSubChannel(id, sc, pid, ref) - return id + db.get().addSubChannel(id, sc, pid.Int()) + return newIdentifer(RefSubChannel, id, pid), nil } // RegisterServer registers the given server s in channelz database. It returns // the unique channelz tracking id assigned to this server. -func RegisterServer(s Server, ref string) int64 { +// +// If channelz is not turned ON, the channelz database is not mutated. +func RegisterServer(s Server, ref string) *Identifier { id := idGen.genID() + if !IsOn() { + return newIdentifer(RefServer, id, nil) + } + svr := &server{ refName: ref, s: s, @@ -244,71 +264,92 @@ func RegisterServer(s Server, ref string) int64 { id: id, } db.get().addServer(id, svr) - return id + return newIdentifer(RefServer, id, nil) } // RegisterListenSocket registers the given listen socket s in channelz database // with ref as its reference name, and add it to the child list of its parent // (identified by pid). It returns the unique channelz tracking id assigned to // this listen socket. -func RegisterListenSocket(s Socket, pid int64, ref string) int64 { - if pid == 0 { - logger.Error("a ListenSocket's parent id cannot be 0") - return 0 +// +// If channelz is not turned ON, the channelz database is not mutated. +func RegisterListenSocket(s Socket, pid *Identifier, ref string) (*Identifier, error) { + if pid == nil { + return nil, errors.New("a ListenSocket's parent id cannot be 0") } id := idGen.genID() - ls := &listenSocket{refName: ref, s: s, id: id, pid: pid} - db.get().addListenSocket(id, ls, pid, ref) - return id + if !IsOn() { + return newIdentifer(RefListenSocket, id, pid), nil + } + + ls := &listenSocket{refName: ref, s: s, id: id, pid: pid.Int()} + db.get().addListenSocket(id, ls, pid.Int()) + return newIdentifer(RefListenSocket, id, pid), nil } // RegisterNormalSocket registers the given normal socket s in channelz database -// with ref as its reference name, and add it to the child list of its parent +// with ref as its reference name, and adds it to the child list of its parent // (identified by pid). It returns the unique channelz tracking id assigned to // this normal socket. -func RegisterNormalSocket(s Socket, pid int64, ref string) int64 { - if pid == 0 { - logger.Error("a NormalSocket's parent id cannot be 0") - return 0 +// +// If channelz is not turned ON, the channelz database is not mutated. +func RegisterNormalSocket(s Socket, pid *Identifier, ref string) (*Identifier, error) { + if pid == nil { + return nil, errors.New("a NormalSocket's parent id cannot be 0") } id := idGen.genID() - ns := &normalSocket{refName: ref, s: s, id: id, pid: pid} - db.get().addNormalSocket(id, ns, pid, ref) - return id + if !IsOn() { + return newIdentifer(RefNormalSocket, id, pid), nil + } + + ns := &normalSocket{refName: ref, s: s, id: id, pid: pid.Int()} + db.get().addNormalSocket(id, ns, pid.Int()) + return newIdentifer(RefNormalSocket, id, pid), nil } -// RemoveEntry removes an entry with unique channelz trakcing id to be id from +// RemoveEntry removes an entry with unique channelz tracking id to be id from // channelz database. -func RemoveEntry(id int64) { - db.get().removeEntry(id) +// +// If channelz is not turned ON, this function is a no-op. +func RemoveEntry(id *Identifier) { + if !IsOn() { + return + } + db.get().removeEntry(id.Int()) } -// TraceEventDesc is what the caller of AddTraceEvent should provide to describe the event to be added -// to the channel trace. -// The Parent field is optional. It is used for event that will be recorded in the entity's parent -// trace also. +// TraceEventDesc is what the caller of AddTraceEvent should provide to describe +// the event to be added to the channel trace. +// +// The Parent field is optional. It is used for an event that will be recorded +// in the entity's parent trace. type TraceEventDesc struct { Desc string Severity Severity Parent *TraceEventDesc } -// AddTraceEvent adds trace related to the entity with specified id, using the provided TraceEventDesc. -func AddTraceEvent(l grpclog.DepthLoggerV2, id int64, depth int, desc *TraceEventDesc) { - for d := desc; d != nil; d = d.Parent { - switch d.Severity { - case CtUnknown, CtInfo: - l.InfoDepth(depth+1, d.Desc) - case CtWarning: - l.WarningDepth(depth+1, d.Desc) - case CtError: - l.ErrorDepth(depth+1, d.Desc) - } +// AddTraceEvent adds trace related to the entity with specified id, using the +// provided TraceEventDesc. +// +// If channelz is not turned ON, this will simply log the event descriptions. +func AddTraceEvent(l grpclog.DepthLoggerV2, id *Identifier, depth int, desc *TraceEventDesc) { + // Log only the trace description associated with the bottom most entity. + switch desc.Severity { + case CtUnknown, CtInfo: + l.InfoDepth(depth+1, withParens(id)+desc.Desc) + case CtWarning: + l.WarningDepth(depth+1, withParens(id)+desc.Desc) + case CtError: + l.ErrorDepth(depth+1, withParens(id)+desc.Desc) } + if getMaxTraceEntry() == 0 { return } - db.get().traceEvent(id, desc) + if IsOn() { + db.get().traceEvent(id.Int(), desc) + } } // channelMap is the storage data structure for channelz. @@ -326,6 +367,17 @@ type channelMap struct { normalSockets map[int64]*normalSocket } +func newChannelMap() *channelMap { + return &channelMap{ + topLevelChannels: make(map[int64]struct{}), + channels: make(map[int64]*channel), + listenSockets: make(map[int64]*listenSocket), + normalSockets: make(map[int64]*normalSocket), + servers: make(map[int64]*server), + subChannels: make(map[int64]*subChannel), + } +} + func (c *channelMap) addServer(id int64, s *server) { c.mu.Lock() s.cm = c @@ -333,7 +385,7 @@ func (c *channelMap) addServer(id int64, s *server) { c.mu.Unlock() } -func (c *channelMap) addChannel(id int64, cn *channel, isTopChannel bool, pid int64, ref string) { +func (c *channelMap) addChannel(id int64, cn *channel, isTopChannel bool, pid int64) { c.mu.Lock() cn.cm = c cn.trace.cm = c @@ -346,7 +398,7 @@ func (c *channelMap) addChannel(id int64, cn *channel, isTopChannel bool, pid in c.mu.Unlock() } -func (c *channelMap) addSubChannel(id int64, sc *subChannel, pid int64, ref string) { +func (c *channelMap) addSubChannel(id int64, sc *subChannel, pid int64) { c.mu.Lock() sc.cm = c sc.trace.cm = c @@ -355,7 +407,7 @@ func (c *channelMap) addSubChannel(id int64, sc *subChannel, pid int64, ref stri c.mu.Unlock() } -func (c *channelMap) addListenSocket(id int64, ls *listenSocket, pid int64, ref string) { +func (c *channelMap) addListenSocket(id int64, ls *listenSocket, pid int64) { c.mu.Lock() ls.cm = c c.listenSockets[id] = ls @@ -363,7 +415,7 @@ func (c *channelMap) addListenSocket(id int64, ls *listenSocket, pid int64, ref c.mu.Unlock() } -func (c *channelMap) addNormalSocket(id int64, ns *normalSocket, pid int64, ref string) { +func (c *channelMap) addNormalSocket(id int64, ns *normalSocket, pid int64) { c.mu.Lock() ns.cm = c c.normalSockets[id] = ns @@ -630,7 +682,7 @@ func (c *channelMap) GetServerSockets(id int64, startID int64, maxResults int64) if count == 0 { end = true } - var s []*SocketMetric + s := make([]*SocketMetric, 0, len(sks)) for _, ns := range sks { sm := &SocketMetric{} sm.SocketData = ns.s.ChannelzMetric() diff --git a/vendor/google.golang.org/grpc/internal/channelz/id.go b/vendor/google.golang.org/grpc/internal/channelz/id.go new file mode 100644 index 000000000..c9a27acd3 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/id.go @@ -0,0 +1,75 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import "fmt" + +// Identifier is an opaque identifier which uniquely identifies an entity in the +// channelz database. +type Identifier struct { + typ RefChannelType + id int64 + str string + pid *Identifier +} + +// Type returns the entity type corresponding to id. +func (id *Identifier) Type() RefChannelType { + return id.typ +} + +// Int returns the integer identifier corresponding to id. +func (id *Identifier) Int() int64 { + return id.id +} + +// String returns a string representation of the entity corresponding to id. +// +// This includes some information about the parent as well. Examples: +// Top-level channel: [Channel #channel-number] +// Nested channel: [Channel #parent-channel-number Channel #channel-number] +// Sub channel: [Channel #parent-channel SubChannel #subchannel-number] +func (id *Identifier) String() string { + return id.str +} + +// Equal returns true if other is the same as id. +func (id *Identifier) Equal(other *Identifier) bool { + if (id != nil) != (other != nil) { + return false + } + if id == nil && other == nil { + return true + } + return id.typ == other.typ && id.id == other.id && id.pid == other.pid +} + +// NewIdentifierForTesting returns a new opaque identifier to be used only for +// testing purposes. +func NewIdentifierForTesting(typ RefChannelType, id int64, pid *Identifier) *Identifier { + return newIdentifer(typ, id, pid) +} + +func newIdentifer(typ RefChannelType, id int64, pid *Identifier) *Identifier { + str := fmt.Sprintf("%s #%d", typ, id) + if pid != nil { + str = fmt.Sprintf("%s %s", pid, str) + } + return &Identifier{typ: typ, id: id, str: str, pid: pid} +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/logging.go b/vendor/google.golang.org/grpc/internal/channelz/logging.go index b0013f9c8..8e13a3d2c 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/logging.go +++ b/vendor/google.golang.org/grpc/internal/channelz/logging.go @@ -26,77 +26,54 @@ import ( var logger = grpclog.Component("channelz") +func withParens(id *Identifier) string { + return "[" + id.String() + "] " +} + // Info logs and adds a trace event if channelz is on. -func Info(l grpclog.DepthLoggerV2, id int64, args ...interface{}) { - if IsOn() { - AddTraceEvent(l, id, 1, &TraceEventDesc{ - Desc: fmt.Sprint(args...), - Severity: CtInfo, - }) - } else { - l.InfoDepth(1, args...) - } +func Info(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprint(args...), + Severity: CtInfo, + }) } // Infof logs and adds a trace event if channelz is on. -func Infof(l grpclog.DepthLoggerV2, id int64, format string, args ...interface{}) { - msg := fmt.Sprintf(format, args...) - if IsOn() { - AddTraceEvent(l, id, 1, &TraceEventDesc{ - Desc: msg, - Severity: CtInfo, - }) - } else { - l.InfoDepth(1, msg) - } +func Infof(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprintf(format, args...), + Severity: CtInfo, + }) } // Warning logs and adds a trace event if channelz is on. -func Warning(l grpclog.DepthLoggerV2, id int64, args ...interface{}) { - if IsOn() { - AddTraceEvent(l, id, 1, &TraceEventDesc{ - Desc: fmt.Sprint(args...), - Severity: CtWarning, - }) - } else { - l.WarningDepth(1, args...) - } +func Warning(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprint(args...), + Severity: CtWarning, + }) } // Warningf logs and adds a trace event if channelz is on. -func Warningf(l grpclog.DepthLoggerV2, id int64, format string, args ...interface{}) { - msg := fmt.Sprintf(format, args...) - if IsOn() { - AddTraceEvent(l, id, 1, &TraceEventDesc{ - Desc: msg, - Severity: CtWarning, - }) - } else { - l.WarningDepth(1, msg) - } +func Warningf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprintf(format, args...), + Severity: CtWarning, + }) } // Error logs and adds a trace event if channelz is on. -func Error(l grpclog.DepthLoggerV2, id int64, args ...interface{}) { - if IsOn() { - AddTraceEvent(l, id, 1, &TraceEventDesc{ - Desc: fmt.Sprint(args...), - Severity: CtError, - }) - } else { - l.ErrorDepth(1, args...) - } +func Error(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprint(args...), + Severity: CtError, + }) } // Errorf logs and adds a trace event if channelz is on. -func Errorf(l grpclog.DepthLoggerV2, id int64, format string, args ...interface{}) { - msg := fmt.Sprintf(format, args...) - if IsOn() { - AddTraceEvent(l, id, 1, &TraceEventDesc{ - Desc: msg, - Severity: CtError, - }) - } else { - l.ErrorDepth(1, msg) - } +func Errorf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprintf(format, args...), + Severity: CtError, + }) } diff --git a/vendor/google.golang.org/grpc/internal/channelz/types.go b/vendor/google.golang.org/grpc/internal/channelz/types.go index 3c595d154..7b2f350e2 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/types.go +++ b/vendor/google.golang.org/grpc/internal/channelz/types.go @@ -273,10 +273,10 @@ func (c *channel) deleteSelfFromMap() (delete bool) { // deleteSelfIfReady tries to delete the channel itself from the channelz database. // The delete process includes two steps: -// 1. delete the channel from the entry relation tree, i.e. delete the channel reference from its -// parent's child list. -// 2. delete the channel from the map, i.e. delete the channel entirely from channelz. Lookup by id -// will return entry not found error. +// 1. delete the channel from the entry relation tree, i.e. delete the channel reference from its +// parent's child list. +// 2. delete the channel from the map, i.e. delete the channel entirely from channelz. Lookup by id +// will return entry not found error. func (c *channel) deleteSelfIfReady() { if !c.deleteSelfFromTree() { return @@ -381,10 +381,10 @@ func (sc *subChannel) deleteSelfFromMap() (delete bool) { // deleteSelfIfReady tries to delete the subchannel itself from the channelz database. // The delete process includes two steps: -// 1. delete the subchannel from the entry relation tree, i.e. delete the subchannel reference from -// its parent's child list. -// 2. delete the subchannel from the map, i.e. delete the subchannel entirely from channelz. Lookup -// by id will return entry not found error. +// 1. delete the subchannel from the entry relation tree, i.e. delete the subchannel reference from +// its parent's child list. +// 2. delete the subchannel from the map, i.e. delete the subchannel entirely from channelz. Lookup +// by id will return entry not found error. func (sc *subChannel) deleteSelfIfReady() { if !sc.deleteSelfFromTree() { return @@ -686,12 +686,33 @@ const ( type RefChannelType int const ( + // RefUnknown indicates an unknown entity type, the zero value for this type. + RefUnknown RefChannelType = iota // RefChannel indicates the referenced entity is a Channel. - RefChannel RefChannelType = iota + RefChannel // RefSubChannel indicates the referenced entity is a SubChannel. RefSubChannel + // RefServer indicates the referenced entity is a Server. + RefServer + // RefListenSocket indicates the referenced entity is a ListenSocket. + RefListenSocket + // RefNormalSocket indicates the referenced entity is a NormalSocket. + RefNormalSocket ) +var refChannelTypeToString = map[RefChannelType]string{ + RefUnknown: "Unknown", + RefChannel: "Channel", + RefSubChannel: "SubChannel", + RefServer: "Server", + RefListenSocket: "ListenSocket", + RefNormalSocket: "NormalSocket", +} + +func (r RefChannelType) String() string { + return refChannelTypeToString[r] +} + func (c *channelTrace) dumpData() *ChannelTrace { c.mu.Lock() ct := &ChannelTrace{EventNum: c.eventCount, CreationTime: c.createdTime} diff --git a/vendor/google.golang.org/grpc/internal/channelz/types_linux.go b/vendor/google.golang.org/grpc/internal/channelz/types_linux.go index 692dd6181..1b1c4cce3 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/types_linux.go +++ b/vendor/google.golang.org/grpc/internal/channelz/types_linux.go @@ -1,5 +1,3 @@ -// +build !appengine - /* * * Copyright 2018 gRPC authors. diff --git a/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go b/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go index 19c2fc521..8b06eed1a 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go +++ b/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go @@ -1,4 +1,5 @@ -// +build !linux appengine +//go:build !linux +// +build !linux /* * @@ -37,6 +38,6 @@ type SocketOptionData struct { // Windows OS doesn't support Socket Option func (s *SocketOptionData) Getsockopt(fd uintptr) { once.Do(func() { - logger.Warning("Channelz: socket options are not supported on non-linux os and appengine.") + logger.Warning("Channelz: socket options are not supported on non-linux environments") }) } diff --git a/vendor/google.golang.org/grpc/internal/channelz/util_linux.go b/vendor/google.golang.org/grpc/internal/channelz/util_linux.go index fdf409d55..8d194e44e 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/util_linux.go +++ b/vendor/google.golang.org/grpc/internal/channelz/util_linux.go @@ -1,5 +1,3 @@ -// +build linux,!appengine - /* * * Copyright 2018 gRPC authors. diff --git a/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go b/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go index 8864a0811..837ddc402 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go +++ b/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go @@ -1,4 +1,5 @@ -// +build !linux appengine +//go:build !linux +// +build !linux /* * diff --git a/vendor/google.golang.org/grpc/internal/credentials/spiffe.go b/vendor/google.golang.org/grpc/internal/credentials/spiffe.go index be70b6cdf..25ade6230 100644 --- a/vendor/google.golang.org/grpc/internal/credentials/spiffe.go +++ b/vendor/google.golang.org/grpc/internal/credentials/spiffe.go @@ -1,5 +1,3 @@ -// +build !appengine - /* * * Copyright 2020 gRPC authors. diff --git a/vendor/google.golang.org/grpc/internal/credentials/syscallconn.go b/vendor/google.golang.org/grpc/internal/credentials/syscallconn.go index f499a614c..2919632d6 100644 --- a/vendor/google.golang.org/grpc/internal/credentials/syscallconn.go +++ b/vendor/google.golang.org/grpc/internal/credentials/syscallconn.go @@ -1,5 +1,3 @@ -// +build !appengine - /* * * Copyright 2018 gRPC authors. diff --git a/vendor/google.golang.org/grpc/internal/credentials/util.go b/vendor/google.golang.org/grpc/internal/credentials/util.go index 55664fa46..f792fd22c 100644 --- a/vendor/google.golang.org/grpc/internal/credentials/util.go +++ b/vendor/google.golang.org/grpc/internal/credentials/util.go @@ -18,7 +18,9 @@ package credentials -import "crypto/tls" +import ( + "crypto/tls" +) const alpnProtoStrH2 = "h2" diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go index 73931a94b..80fd5c7d2 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go @@ -21,18 +21,46 @@ package envconfig import ( "os" + "strconv" "strings" ) -const ( - prefix = "GRPC_GO_" - retryStr = prefix + "RETRY" - txtErrIgnoreStr = prefix + "IGNORE_TXT_ERRORS" -) - var ( - // Retry is set if retry is explicitly enabled via "GRPC_GO_RETRY=on". - Retry = strings.EqualFold(os.Getenv(retryStr), "on") // TXTErrIgnore is set if TXT errors should be ignored ("GRPC_GO_IGNORE_TXT_ERRORS" is not "false"). - TXTErrIgnore = !strings.EqualFold(os.Getenv(txtErrIgnoreStr), "false") + TXTErrIgnore = boolFromEnv("GRPC_GO_IGNORE_TXT_ERRORS", true) + // AdvertiseCompressors is set if registered compressor should be advertised + // ("GRPC_GO_ADVERTISE_COMPRESSORS" is not "false"). + AdvertiseCompressors = boolFromEnv("GRPC_GO_ADVERTISE_COMPRESSORS", true) + // RingHashCap indicates the maximum ring size which defaults to 4096 + // entries but may be overridden by setting the environment variable + // "GRPC_RING_HASH_CAP". This does not override the default bounds + // checking which NACKs configs specifying ring sizes > 8*1024*1024 (~8M). + RingHashCap = uint64FromEnv("GRPC_RING_HASH_CAP", 4096, 1, 8*1024*1024) + // PickFirstLBConfig is set if we should support configuration of the + // pick_first LB policy, which can be enabled by setting the environment + // variable "GRPC_EXPERIMENTAL_PICKFIRST_LB_CONFIG" to "true". + PickFirstLBConfig = boolFromEnv("GRPC_EXPERIMENTAL_PICKFIRST_LB_CONFIG", false) ) + +func boolFromEnv(envVar string, def bool) bool { + if def { + // The default is true; return true unless the variable is "false". + return !strings.EqualFold(os.Getenv(envVar), "false") + } + // The default is false; return false unless the variable is "true". + return strings.EqualFold(os.Getenv(envVar), "true") +} + +func uint64FromEnv(envVar string, def, min, max uint64) uint64 { + v, err := strconv.ParseUint(os.Getenv(envVar), 10, 64) + if err != nil { + return def + } + if v < min { + return min + } + if v > max { + return max + } + return v +} diff --git a/vendor/google.golang.org/grpc/internal/envconfig/observability.go b/vendor/google.golang.org/grpc/internal/envconfig/observability.go new file mode 100644 index 000000000..dd314cfb1 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/envconfig/observability.go @@ -0,0 +1,42 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package envconfig + +import "os" + +const ( + envObservabilityConfig = "GRPC_GCP_OBSERVABILITY_CONFIG" + envObservabilityConfigFile = "GRPC_GCP_OBSERVABILITY_CONFIG_FILE" +) + +var ( + // ObservabilityConfig is the json configuration for the gcp/observability + // package specified directly in the envObservabilityConfig env var. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. + ObservabilityConfig = os.Getenv(envObservabilityConfig) + // ObservabilityConfigFile is the json configuration for the + // gcp/observability specified in a file with the location specified in + // envObservabilityConfigFile env var. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. + ObservabilityConfigFile = os.Getenv(envObservabilityConfigFile) +) diff --git a/vendor/google.golang.org/grpc/internal/envconfig/xds.go b/vendor/google.golang.org/grpc/internal/envconfig/xds.go new file mode 100644 index 000000000..02b4b6a1c --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/envconfig/xds.go @@ -0,0 +1,95 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package envconfig + +import ( + "os" +) + +const ( + // XDSBootstrapFileNameEnv is the env variable to set bootstrap file name. + // Do not use this and read from env directly. Its value is read and kept in + // variable XDSBootstrapFileName. + // + // When both bootstrap FileName and FileContent are set, FileName is used. + XDSBootstrapFileNameEnv = "GRPC_XDS_BOOTSTRAP" + // XDSBootstrapFileContentEnv is the env variable to set bootstrap file + // content. Do not use this and read from env directly. Its value is read + // and kept in variable XDSBootstrapFileContent. + // + // When both bootstrap FileName and FileContent are set, FileName is used. + XDSBootstrapFileContentEnv = "GRPC_XDS_BOOTSTRAP_CONFIG" +) + +var ( + // XDSBootstrapFileName holds the name of the file which contains xDS + // bootstrap configuration. Users can specify the location of the bootstrap + // file by setting the environment variable "GRPC_XDS_BOOTSTRAP". + // + // When both bootstrap FileName and FileContent are set, FileName is used. + XDSBootstrapFileName = os.Getenv(XDSBootstrapFileNameEnv) + // XDSBootstrapFileContent holds the content of the xDS bootstrap + // configuration. Users can specify the bootstrap config by setting the + // environment variable "GRPC_XDS_BOOTSTRAP_CONFIG". + // + // When both bootstrap FileName and FileContent are set, FileName is used. + XDSBootstrapFileContent = os.Getenv(XDSBootstrapFileContentEnv) + // XDSRingHash indicates whether ring hash support is enabled, which can be + // disabled by setting the environment variable + // "GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH" to "false". + XDSRingHash = boolFromEnv("GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH", true) + // XDSClientSideSecurity is used to control processing of security + // configuration on the client-side. + // + // Note that there is no env var protection for the server-side because we + // have a brand new API on the server-side and users explicitly need to use + // the new API to get security integration on the server. + XDSClientSideSecurity = boolFromEnv("GRPC_XDS_EXPERIMENTAL_SECURITY_SUPPORT", true) + // XDSAggregateAndDNS indicates whether processing of aggregated cluster and + // DNS cluster is enabled, which can be disabled by setting the environment + // variable "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER" + // to "false". + XDSAggregateAndDNS = boolFromEnv("GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER", true) + + // XDSRBAC indicates whether xDS configured RBAC HTTP Filter is enabled, + // which can be disabled by setting the environment variable + // "GRPC_XDS_EXPERIMENTAL_RBAC" to "false". + XDSRBAC = boolFromEnv("GRPC_XDS_EXPERIMENTAL_RBAC", true) + // XDSOutlierDetection indicates whether outlier detection support is + // enabled, which can be disabled by setting the environment variable + // "GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION" to "false". + XDSOutlierDetection = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION", true) + // XDSFederation indicates whether federation support is enabled, which can + // be enabled by setting the environment variable + // "GRPC_EXPERIMENTAL_XDS_FEDERATION" to "true". + XDSFederation = boolFromEnv("GRPC_EXPERIMENTAL_XDS_FEDERATION", true) + + // XDSRLS indicates whether processing of Cluster Specifier plugins and + // support for the RLS CLuster Specifier is enabled, which can be disabled by + // setting the environment variable "GRPC_EXPERIMENTAL_XDS_RLS_LB" to + // "false". + XDSRLS = boolFromEnv("GRPC_EXPERIMENTAL_XDS_RLS_LB", true) + + // C2PResolverTestOnlyTrafficDirectorURI is the TD URI for testing. + C2PResolverTestOnlyTrafficDirectorURI = os.Getenv("GRPC_TEST_ONLY_GOOGLE_C2P_RESOLVER_TRAFFIC_DIRECTOR_URI") + // XDSCustomLBPolicy indicates whether Custom LB Policies are enabled, which + // can be disabled by setting the environment variable + // "GRPC_EXPERIMENTAL_XDS_CUSTOM_LB_CONFIG" to "false". + XDSCustomLBPolicy = boolFromEnv("GRPC_EXPERIMENTAL_XDS_CUSTOM_LB_CONFIG", true) +) diff --git a/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go b/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go index e6f975cbf..b68e26a36 100644 --- a/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go +++ b/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go @@ -110,17 +110,17 @@ type LoggerV2 interface { // This is a copy of the DepthLoggerV2 defined in the external grpclog package. // It is defined here to avoid a circular dependency. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. type DepthLoggerV2 interface { - // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Print. + // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println. InfoDepth(depth int, args ...interface{}) - // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Print. + // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println. WarningDepth(depth int, args ...interface{}) - // ErrorDetph logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Print. + // ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println. ErrorDepth(depth int, args ...interface{}) - // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Print. + // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println. FatalDepth(depth int, args ...interface{}) } diff --git a/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go b/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go index 82af70e96..02224b42c 100644 --- a/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go +++ b/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go @@ -63,6 +63,9 @@ func (pl *PrefixLogger) Errorf(format string, args ...interface{}) { // Debugf does info logging at verbose level 2. func (pl *PrefixLogger) Debugf(format string, args ...interface{}) { + // TODO(6044): Refactor interfaces LoggerV2 and DepthLogger, and maybe + // rewrite PrefixLogger a little to ensure that we don't use the global + // `Logger` here, and instead use the `logger` field. if !Logger.V(2) { return } @@ -73,6 +76,15 @@ func (pl *PrefixLogger) Debugf(format string, args ...interface{}) { return } InfoDepth(1, fmt.Sprintf(format, args...)) + +} + +// V reports whether verbosity level l is at least the requested verbose level. +func (pl *PrefixLogger) V(l int) bool { + // TODO(6044): Refactor interfaces LoggerV2 and DepthLogger, and maybe + // rewrite PrefixLogger a little to ensure that we don't use the global + // `Logger` here, and instead use the `logger` field. + return Logger.V(l) } // NewPrefixLogger creates a prefix logger with the given prefix. diff --git a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go index 740f83c2b..d08e3e907 100644 --- a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go +++ b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go @@ -52,6 +52,13 @@ func Intn(n int) int { return r.Intn(n) } +// Int31n implements rand.Int31n on the grpcrand global source. +func Int31n(n int32) int32 { + mu.Lock() + defer mu.Unlock() + return r.Int31n(n) +} + // Float64 implements rand.Float64 on the grpcrand global source. func Float64() float64 { mu.Lock() @@ -65,3 +72,17 @@ func Uint64() uint64 { defer mu.Unlock() return r.Uint64() } + +// Uint32 implements rand.Uint32 on the grpcrand global source. +func Uint32() uint32 { + mu.Lock() + defer mu.Unlock() + return r.Uint32() +} + +// Shuffle implements rand.Shuffle on the grpcrand global source. +var Shuffle = func(n int, f func(int, int)) { + mu.Lock() + defer mu.Unlock() + r.Shuffle(n, f) +} diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go new file mode 100644 index 000000000..37b8d4117 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go @@ -0,0 +1,119 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpcsync + +import ( + "context" + "sync" + + "google.golang.org/grpc/internal/buffer" +) + +// CallbackSerializer provides a mechanism to schedule callbacks in a +// synchronized manner. It provides a FIFO guarantee on the order of execution +// of scheduled callbacks. New callbacks can be scheduled by invoking the +// Schedule() method. +// +// This type is safe for concurrent access. +type CallbackSerializer struct { + // Done is closed once the serializer is shut down completely, i.e all + // scheduled callbacks are executed and the serializer has deallocated all + // its resources. + Done chan struct{} + + callbacks *buffer.Unbounded + closedMu sync.Mutex + closed bool +} + +// NewCallbackSerializer returns a new CallbackSerializer instance. The provided +// context will be passed to the scheduled callbacks. Users should cancel the +// provided context to shutdown the CallbackSerializer. It is guaranteed that no +// callbacks will be added once this context is canceled, and any pending un-run +// callbacks will be executed before the serializer is shut down. +func NewCallbackSerializer(ctx context.Context) *CallbackSerializer { + t := &CallbackSerializer{ + Done: make(chan struct{}), + callbacks: buffer.NewUnbounded(), + } + go t.run(ctx) + return t +} + +// Schedule adds a callback to be scheduled after existing callbacks are run. +// +// Callbacks are expected to honor the context when performing any blocking +// operations, and should return early when the context is canceled. +// +// Return value indicates if the callback was successfully added to the list of +// callbacks to be executed by the serializer. It is not possible to add +// callbacks once the context passed to NewCallbackSerializer is cancelled. +func (t *CallbackSerializer) Schedule(f func(ctx context.Context)) bool { + t.closedMu.Lock() + defer t.closedMu.Unlock() + + if t.closed { + return false + } + t.callbacks.Put(f) + return true +} + +func (t *CallbackSerializer) run(ctx context.Context) { + var backlog []func(context.Context) + + defer close(t.Done) + for ctx.Err() == nil { + select { + case <-ctx.Done(): + // Do nothing here. Next iteration of the for loop will not happen, + // since ctx.Err() would be non-nil. + case callback, ok := <-t.callbacks.Get(): + if !ok { + return + } + t.callbacks.Load() + callback.(func(ctx context.Context))(ctx) + } + } + + // Fetch pending callbacks if any, and execute them before returning from + // this method and closing t.Done. + t.closedMu.Lock() + t.closed = true + backlog = t.fetchPendingCallbacks() + t.callbacks.Close() + t.closedMu.Unlock() + for _, b := range backlog { + b(ctx) + } +} + +func (t *CallbackSerializer) fetchPendingCallbacks() []func(context.Context) { + var backlog []func(context.Context) + for { + select { + case b := <-t.callbacks.Get(): + backlog = append(backlog, b.(func(context.Context))) + t.callbacks.Load() + default: + return backlog + } + } +} diff --git a/vendor/google.golang.org/grpc/internal/credentials/spiffe_appengine.go b/vendor/google.golang.org/grpc/internal/grpcsync/oncefunc.go similarity index 67% rename from vendor/google.golang.org/grpc/internal/credentials/spiffe_appengine.go rename to vendor/google.golang.org/grpc/internal/grpcsync/oncefunc.go index af6f57719..6635f7bca 100644 --- a/vendor/google.golang.org/grpc/internal/credentials/spiffe_appengine.go +++ b/vendor/google.golang.org/grpc/internal/grpcsync/oncefunc.go @@ -1,8 +1,6 @@ -// +build appengine - /* * - * Copyright 2020 gRPC authors. + * Copyright 2022 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -18,14 +16,17 @@ * */ -package credentials +package grpcsync import ( - "crypto/tls" - "net/url" + "sync" ) -// SPIFFEIDFromState is a no-op for appengine builds. -func SPIFFEIDFromState(state tls.ConnectionState) *url.URL { - return nil +// OnceFunc returns a function wrapping f which ensures f is only executed +// once even if the returned function is executed multiple times. +func OnceFunc(f func()) func() { + var once sync.Once + return func() { + once.Do(f) + } } diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/compressor.go b/vendor/google.golang.org/grpc/internal/grpcutil/compressor.go new file mode 100644 index 000000000..9f4090967 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcutil/compressor.go @@ -0,0 +1,47 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpcutil + +import ( + "strings" + + "google.golang.org/grpc/internal/envconfig" +) + +// RegisteredCompressorNames holds names of the registered compressors. +var RegisteredCompressorNames []string + +// IsCompressorNameRegistered returns true when name is available in registry. +func IsCompressorNameRegistered(name string) bool { + for _, compressor := range RegisteredCompressorNames { + if compressor == name { + return true + } + } + return false +} + +// RegisteredCompressors returns a string of registered compressor names +// separated by comma. +func RegisteredCompressors() string { + if !envconfig.AdvertiseCompressors { + return "" + } + return strings.Join(RegisteredCompressorNames, ",") +} diff --git a/vendor/google.golang.org/grpc/internal/credentials/syscallconn_appengine.go b/vendor/google.golang.org/grpc/internal/grpcutil/grpcutil.go similarity index 72% rename from vendor/google.golang.org/grpc/internal/credentials/syscallconn_appengine.go rename to vendor/google.golang.org/grpc/internal/grpcutil/grpcutil.go index a6144cd66..e2f948e8f 100644 --- a/vendor/google.golang.org/grpc/internal/credentials/syscallconn_appengine.go +++ b/vendor/google.golang.org/grpc/internal/grpcutil/grpcutil.go @@ -1,8 +1,6 @@ -// +build appengine - /* * - * Copyright 2018 gRPC authors. + * Copyright 2021 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -18,13 +16,5 @@ * */ -package credentials - -import ( - "net" -) - -// WrapSyscallConn returns newConn on appengine. -func WrapSyscallConn(rawConn, newConn net.Conn) net.Conn { - return newConn -} +// Package grpcutil provides utility functions used across the gRPC codebase. +package grpcutil diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/method.go b/vendor/google.golang.org/grpc/internal/grpcutil/method.go index 4e7475060..ec62b4775 100644 --- a/vendor/google.golang.org/grpc/internal/grpcutil/method.go +++ b/vendor/google.golang.org/grpc/internal/grpcutil/method.go @@ -25,7 +25,6 @@ import ( // ParseMethod splits service and method from the input. It expects format // "/service/method". -// func ParseMethod(methodName string) (service, method string, _ error) { if !strings.HasPrefix(methodName, "/") { return "", "", errors.New("invalid method name: should start with /") @@ -39,6 +38,11 @@ func ParseMethod(methodName string) (service, method string, _ error) { return methodName[:pos], methodName[pos+1:], nil } +// baseContentType is the base content-type for gRPC. This is a valid +// content-type on it's own, but can also include a content-subtype such as +// "proto" as a suffix after "+" or ";". See +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests +// for more details. const baseContentType = "application/grpc" // ContentSubtype returns the content-subtype for the given content-type. The diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/go113.go b/vendor/google.golang.org/grpc/internal/grpcutil/regex.go similarity index 63% rename from vendor/google.golang.org/grpc/internal/resolver/dns/go113.go rename to vendor/google.golang.org/grpc/internal/grpcutil/regex.go index 8783a8cf8..7a092b2b8 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/dns/go113.go +++ b/vendor/google.golang.org/grpc/internal/grpcutil/regex.go @@ -1,8 +1,6 @@ -// +build go1.13 - /* * - * Copyright 2019 gRPC authors. + * Copyright 2021 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -18,16 +16,16 @@ * */ -package dns +package grpcutil -import "net" +import "regexp" -func init() { - filterError = func(err error) error { - if dnsErr, ok := err.(*net.DNSError); ok && dnsErr.IsNotFound { - // The name does not exist; not an error. - return nil - } - return err +// FullMatchWithRegex returns whether the full text matches the regex provided. +func FullMatchWithRegex(re *regexp.Regexp, text string) bool { + if len(text) == 0 { + return re.MatchString(text) } + re.Longest() + rem := re.FindString(text) + return len(rem) == len(text) } diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/target.go b/vendor/google.golang.org/grpc/internal/grpcutil/target.go deleted file mode 100644 index 8833021da..000000000 --- a/vendor/google.golang.org/grpc/internal/grpcutil/target.go +++ /dev/null @@ -1,89 +0,0 @@ -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package grpcutil provides a bunch of utility functions to be used across the -// gRPC codebase. -package grpcutil - -import ( - "strings" - - "google.golang.org/grpc/resolver" -) - -// split2 returns the values from strings.SplitN(s, sep, 2). -// If sep is not found, it returns ("", "", false) instead. -func split2(s, sep string) (string, string, bool) { - spl := strings.SplitN(s, sep, 2) - if len(spl) < 2 { - return "", "", false - } - return spl[0], spl[1], true -} - -// ParseTarget splits target into a resolver.Target struct containing scheme, -// authority and endpoint. skipUnixColonParsing indicates that the parse should -// not parse "unix:[path]" cases. This should be true in cases where a custom -// dialer is present, to prevent a behavior change. -// -// If target is not a valid scheme://authority/endpoint as specified in -// https://github.com/grpc/grpc/blob/master/doc/naming.md, -// it returns {Endpoint: target}. -func ParseTarget(target string, skipUnixColonParsing bool) (ret resolver.Target) { - var ok bool - if strings.HasPrefix(target, "unix-abstract:") { - if strings.HasPrefix(target, "unix-abstract://") { - // Maybe, with Authority specified, try to parse it - var remain string - ret.Scheme, remain, _ = split2(target, "://") - ret.Authority, ret.Endpoint, ok = split2(remain, "/") - if !ok { - // No Authority, add the "//" back - ret.Endpoint = "//" + remain - } else { - // Found Authority, add the "/" back - ret.Endpoint = "/" + ret.Endpoint - } - } else { - // Without Authority specified, split target on ":" - ret.Scheme, ret.Endpoint, _ = split2(target, ":") - } - return ret - } - ret.Scheme, ret.Endpoint, ok = split2(target, "://") - if !ok { - if strings.HasPrefix(target, "unix:") && !skipUnixColonParsing { - // Handle the "unix:[local/path]" and "unix:[/absolute/path]" cases, - // because splitting on :// only handles the - // "unix://[/absolute/path]" case. Only handle if the dialer is nil, - // to avoid a behavior change with custom dialers. - return resolver.Target{Scheme: "unix", Endpoint: target[len("unix:"):]} - } - return resolver.Target{Endpoint: target} - } - ret.Authority, ret.Endpoint, ok = split2(ret.Endpoint, "/") - if !ok { - return resolver.Target{Endpoint: target} - } - if ret.Scheme == "unix" { - // Add the "/" back in the unix case, so the unix resolver receives the - // actual endpoint in the "unix://[/absolute/path]" case. - ret.Endpoint = "/" + ret.Endpoint - } - return ret -} diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go index 1b596bf35..42ff39c84 100644 --- a/vendor/google.golang.org/grpc/internal/internal.go +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -38,11 +38,10 @@ var ( // KeepaliveMinPingTime is the minimum ping interval. This must be 10s by // default, but tests may wish to set it lower for convenience. KeepaliveMinPingTime = 10 * time.Second - // ParseServiceConfigForTesting is for creating a fake - // ClientConn for resolver testing only - ParseServiceConfigForTesting interface{} // func(string) *serviceconfig.ParseResult + // ParseServiceConfig parses a JSON representation of the service config. + ParseServiceConfig interface{} // func(string) *serviceconfig.ParseResult // EqualServiceConfigForTesting is for testing service config generation and - // parsing. Both a and b should be returned by ParseServiceConfigForTesting. + // parsing. Both a and b should be returned by ParseServiceConfig. // This function compares the config without rawJSON stripped, in case the // there's difference in white space. EqualServiceConfigForTesting func(a, b serviceconfig.Config) bool @@ -59,11 +58,112 @@ var ( // gRPC server. An xDS-enabled server needs to know what type of credentials // is configured on the underlying gRPC server. This is set by server.go. GetServerCredentials interface{} // func (*grpc.Server) credentials.TransportCredentials + // CanonicalString returns the canonical string of the code defined here: + // https://github.com/grpc/grpc/blob/master/doc/statuscodes.md. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. + CanonicalString interface{} // func (codes.Code) string // DrainServerTransports initiates a graceful close of existing connections // on a gRPC server accepted on the provided listener address. An // xDS-enabled server invokes this method on a grpc.Server when a particular // listener moves to "not-serving" mode. DrainServerTransports interface{} // func(*grpc.Server, string) + // AddGlobalServerOptions adds an array of ServerOption that will be + // effective globally for newly created servers. The priority will be: 1. + // user-provided; 2. this method; 3. default values. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. + AddGlobalServerOptions interface{} // func(opt ...ServerOption) + // ClearGlobalServerOptions clears the array of extra ServerOption. This + // method is useful in testing and benchmarking. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. + ClearGlobalServerOptions func() + // AddGlobalDialOptions adds an array of DialOption that will be effective + // globally for newly created client channels. The priority will be: 1. + // user-provided; 2. this method; 3. default values. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. + AddGlobalDialOptions interface{} // func(opt ...DialOption) + // DisableGlobalDialOptions returns a DialOption that prevents the + // ClientConn from applying the global DialOptions (set via + // AddGlobalDialOptions). + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. + DisableGlobalDialOptions interface{} // func() grpc.DialOption + // ClearGlobalDialOptions clears the array of extra DialOption. This + // method is useful in testing and benchmarking. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. + ClearGlobalDialOptions func() + // JoinDialOptions combines the dial options passed as arguments into a + // single dial option. + JoinDialOptions interface{} // func(...grpc.DialOption) grpc.DialOption + // JoinServerOptions combines the server options passed as arguments into a + // single server option. + JoinServerOptions interface{} // func(...grpc.ServerOption) grpc.ServerOption + + // WithBinaryLogger returns a DialOption that specifies the binary logger + // for a ClientConn. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. + WithBinaryLogger interface{} // func(binarylog.Logger) grpc.DialOption + // BinaryLogger returns a ServerOption that can set the binary logger for a + // server. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. + BinaryLogger interface{} // func(binarylog.Logger) grpc.ServerOption + + // NewXDSResolverWithConfigForTesting creates a new xds resolver builder using + // the provided xds bootstrap config instead of the global configuration from + // the supported environment variables. The resolver.Builder is meant to be + // used in conjunction with the grpc.WithResolvers DialOption. + // + // Testing Only + // + // This function should ONLY be used for testing and may not work with some + // other features, including the CSDS service. + NewXDSResolverWithConfigForTesting interface{} // func([]byte) (resolver.Builder, error) + + // RegisterRLSClusterSpecifierPluginForTesting registers the RLS Cluster + // Specifier Plugin for testing purposes, regardless of the XDSRLS environment + // variable. + // + // TODO: Remove this function once the RLS env var is removed. + RegisterRLSClusterSpecifierPluginForTesting func() + + // UnregisterRLSClusterSpecifierPluginForTesting unregisters the RLS Cluster + // Specifier Plugin for testing purposes. This is needed because there is no way + // to unregister the RLS Cluster Specifier Plugin after registering it solely + // for testing purposes using RegisterRLSClusterSpecifierPluginForTesting(). + // + // TODO: Remove this function once the RLS env var is removed. + UnregisterRLSClusterSpecifierPluginForTesting func() + + // RegisterRBACHTTPFilterForTesting registers the RBAC HTTP Filter for testing + // purposes, regardless of the RBAC environment variable. + // + // TODO: Remove this function once the RBAC env var is removed. + RegisterRBACHTTPFilterForTesting func() + + // UnregisterRBACHTTPFilterForTesting unregisters the RBAC HTTP Filter for + // testing purposes. This is needed because there is no way to unregister the + // HTTP Filter after registering it solely for testing purposes using + // RegisterRBACHTTPFilterForTesting(). + // + // TODO: Remove this function once the RBAC env var is removed. + UnregisterRBACHTTPFilterForTesting func() + + // ORCAAllowAnyMinReportingInterval is for examples/orca use ONLY. + ORCAAllowAnyMinReportingInterval interface{} // func(so *orca.ServiceOptions) ) // HealthChecker defines the signature of the client-side LB channel health checking function. @@ -86,3 +186,9 @@ const ( // that supports backend returned by grpclb balancer. CredsBundleModeBackendFromBalancer = "backend-from-balancer" ) + +// RLSLoadBalancingPolicyName is the name of the RLS LB policy. +// +// It currently has an experimental suffix which would be removed once +// end-to-end testing of the policy is completed. +const RLSLoadBalancingPolicyName = "rls_experimental" diff --git a/vendor/google.golang.org/grpc/internal/metadata/metadata.go b/vendor/google.golang.org/grpc/internal/metadata/metadata.go index 302262613..c82e608e0 100644 --- a/vendor/google.golang.org/grpc/internal/metadata/metadata.go +++ b/vendor/google.golang.org/grpc/internal/metadata/metadata.go @@ -22,6 +22,9 @@ package metadata import ( + "fmt" + "strings" + "google.golang.org/grpc/metadata" "google.golang.org/grpc/resolver" ) @@ -30,14 +33,38 @@ type mdKeyType string const mdKey = mdKeyType("grpc.internal.address.metadata") +type mdValue metadata.MD + +func (m mdValue) Equal(o interface{}) bool { + om, ok := o.(mdValue) + if !ok { + return false + } + if len(m) != len(om) { + return false + } + for k, v := range m { + ov := om[k] + if len(ov) != len(v) { + return false + } + for i, ve := range v { + if ov[i] != ve { + return false + } + } + } + return true +} + // Get returns the metadata of addr. func Get(addr resolver.Address) metadata.MD { attrs := addr.Attributes if attrs == nil { return nil } - md, _ := attrs.Value(mdKey).(metadata.MD) - return md + md, _ := attrs.Value(mdKey).(mdValue) + return metadata.MD(md) } // Set sets (overrides) the metadata in addr. @@ -45,6 +72,61 @@ func Get(addr resolver.Address) metadata.MD { // When a SubConn is created with this address, the RPCs sent on it will all // have this metadata. func Set(addr resolver.Address, md metadata.MD) resolver.Address { - addr.Attributes = addr.Attributes.WithValues(mdKey, md) + addr.Attributes = addr.Attributes.WithValue(mdKey, mdValue(md)) return addr } + +// Validate validates every pair in md with ValidatePair. +func Validate(md metadata.MD) error { + for k, vals := range md { + if err := ValidatePair(k, vals...); err != nil { + return err + } + } + return nil +} + +// hasNotPrintable return true if msg contains any characters which are not in %x20-%x7E +func hasNotPrintable(msg string) bool { + // for i that saving a conversion if not using for range + for i := 0; i < len(msg); i++ { + if msg[i] < 0x20 || msg[i] > 0x7E { + return true + } + } + return false +} + +// ValidatePair validate a key-value pair with the following rules (the pseudo-header will be skipped) : +// +// - key must contain one or more characters. +// - the characters in the key must be contained in [0-9 a-z _ - .]. +// - if the key ends with a "-bin" suffix, no validation of the corresponding value is performed. +// - the characters in the every value must be printable (in [%x20-%x7E]). +func ValidatePair(key string, vals ...string) error { + // key should not be empty + if key == "" { + return fmt.Errorf("there is an empty key in the header") + } + // pseudo-header will be ignored + if key[0] == ':' { + return nil + } + // check key, for i that saving a conversion if not using for range + for i := 0; i < len(key); i++ { + r := key[i] + if !(r >= 'a' && r <= 'z') && !(r >= '0' && r <= '9') && r != '.' && r != '-' && r != '_' { + return fmt.Errorf("header key %q contains illegal characters not in [0-9a-z-_.]", key) + } + } + if strings.HasSuffix(key, "-bin") { + return nil + } + // check value + for _, val := range vals { + if hasNotPrintable(val) { + return fmt.Errorf("header key %q contains value with non-printable ASCII characters", key) + } + } + return nil +} diff --git a/vendor/google.golang.org/grpc/internal/pretty/pretty.go b/vendor/google.golang.org/grpc/internal/pretty/pretty.go new file mode 100644 index 000000000..0177af4b5 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/pretty/pretty.go @@ -0,0 +1,82 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package pretty defines helper functions to pretty-print structs for logging. +package pretty + +import ( + "bytes" + "encoding/json" + "fmt" + + "github.com/golang/protobuf/jsonpb" + protov1 "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/encoding/protojson" + protov2 "google.golang.org/protobuf/proto" +) + +const jsonIndent = " " + +// ToJSON marshals the input into a json string. +// +// If marshal fails, it falls back to fmt.Sprintf("%+v"). +func ToJSON(e interface{}) string { + switch ee := e.(type) { + case protov1.Message: + mm := jsonpb.Marshaler{Indent: jsonIndent} + ret, err := mm.MarshalToString(ee) + if err != nil { + // This may fail for proto.Anys, e.g. for xDS v2, LDS, the v2 + // messages are not imported, and this will fail because the message + // is not found. + return fmt.Sprintf("%+v", ee) + } + return ret + case protov2.Message: + mm := protojson.MarshalOptions{ + Multiline: true, + Indent: jsonIndent, + } + ret, err := mm.Marshal(ee) + if err != nil { + // This may fail for proto.Anys, e.g. for xDS v2, LDS, the v2 + // messages are not imported, and this will fail because the message + // is not found. + return fmt.Sprintf("%+v", ee) + } + return string(ret) + default: + ret, err := json.MarshalIndent(ee, "", jsonIndent) + if err != nil { + return fmt.Sprintf("%+v", ee) + } + return string(ret) + } +} + +// FormatJSON formats the input json bytes with indentation. +// +// If Indent fails, it returns the unchanged input as string. +func FormatJSON(b []byte) string { + var out bytes.Buffer + err := json.Indent(&out, b, "", jsonIndent) + if err != nil { + return string(b) + } + return out.String() +} diff --git a/vendor/google.golang.org/grpc/internal/resolver/config_selector.go b/vendor/google.golang.org/grpc/internal/resolver/config_selector.go index 5e7f36703..c7a18a948 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/config_selector.go +++ b/vendor/google.golang.org/grpc/internal/resolver/config_selector.go @@ -117,9 +117,12 @@ type ClientInterceptor interface { NewStream(ctx context.Context, ri RPCInfo, done func(), newStream func(ctx context.Context, done func()) (ClientStream, error)) (ClientStream, error) } -// ServerInterceptor is unimplementable; do not use. +// ServerInterceptor is an interceptor for incoming RPC's on gRPC server side. type ServerInterceptor interface { - notDefined() + // AllowRPC checks if an incoming RPC is allowed to proceed based on + // information about connection RPC was received on, and HTTP Headers. This + // information will be piped into context. + AllowRPC(ctx context.Context) error // TODO: Make this a real interceptor for filters such as rate limiting. } type csKeyType string @@ -129,7 +132,7 @@ const csKey = csKeyType("grpc.internal.resolver.configSelector") // SetConfigSelector sets the config selector in state and returns the new // state. func SetConfigSelector(state resolver.State, cs ConfigSelector) resolver.State { - state.Attributes = state.Attributes.WithValues(csKey, cs) + state.Attributes = state.Attributes.WithValue(csKey, cs) return state } diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go index 03825bbe7..09a667f33 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go +++ b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go @@ -116,7 +116,7 @@ type dnsBuilder struct{} // Build creates and starts a DNS resolver that watches the name resolution of the target. func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { - host, port, err := parseTarget(target.Endpoint, defaultPort) + host, port, err := parseTarget(target.Endpoint(), defaultPort) if err != nil { return nil, err } @@ -140,10 +140,10 @@ func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts disableServiceConfig: opts.DisableServiceConfig, } - if target.Authority == "" { + if target.URL.Host == "" { d.resolver = defaultResolver } else { - d.resolver, err = customAuthorityResolver(target.Authority) + d.resolver, err = customAuthorityResolver(target.URL.Host) if err != nil { return nil, err } @@ -277,18 +277,13 @@ func (d *dnsResolver) lookupSRV() ([]resolver.Address, error) { return newAddrs, nil } -var filterError = func(err error) error { +func handleDNSError(err error, lookupType string) error { if dnsErr, ok := err.(*net.DNSError); ok && !dnsErr.IsTimeout && !dnsErr.IsTemporary { // Timeouts and temporary errors should be communicated to gRPC to // attempt another DNS query (with backoff). Other errors should be // suppressed (they may represent the absence of a TXT record). return nil } - return err -} - -func handleDNSError(err error, lookupType string) error { - err = filterError(err) if err != nil { err = fmt.Errorf("dns: %v record lookup error: %v", lookupType, err) logger.Info(err) @@ -323,12 +318,12 @@ func (d *dnsResolver) lookupTXT() *serviceconfig.ParseResult { } func (d *dnsResolver) lookupHost() ([]resolver.Address, error) { - var newAddrs []resolver.Address addrs, err := d.resolver.LookupHost(d.ctx, d.host) if err != nil { err = handleDNSError(err, "A") return nil, err } + newAddrs := make([]resolver.Address, 0, len(addrs)) for _, a := range addrs { ip, ok := formatIP(a) if !ok { diff --git a/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go b/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go index 520d9229e..afac56572 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go +++ b/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go @@ -20,13 +20,20 @@ // name without scheme back to gRPC as resolved address. package passthrough -import "google.golang.org/grpc/resolver" +import ( + "errors" + + "google.golang.org/grpc/resolver" +) const scheme = "passthrough" type passthroughBuilder struct{} func (*passthroughBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { + if target.Endpoint() == "" && opts.Dialer == nil { + return nil, errors.New("passthrough: received empty target in Build()") + } r := &passthroughResolver{ target: target, cc: cc, @@ -45,7 +52,7 @@ type passthroughResolver struct { } func (r *passthroughResolver) start() { - r.cc.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: r.target.Endpoint}}}) + r.cc.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: r.target.Endpoint()}}}) } func (*passthroughResolver) ResolveNow(o resolver.ResolveNowOptions) {} diff --git a/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go b/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go index 0d5a811dd..160911687 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go +++ b/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go @@ -34,13 +34,24 @@ type builder struct { } func (b *builder) Build(target resolver.Target, cc resolver.ClientConn, _ resolver.BuildOptions) (resolver.Resolver, error) { - if target.Authority != "" { - return nil, fmt.Errorf("invalid (non-empty) authority: %v", target.Authority) + if target.URL.Host != "" { + return nil, fmt.Errorf("invalid (non-empty) authority: %v", target.URL.Host) } - addr := resolver.Address{Addr: target.Endpoint} + + // gRPC was parsing the dial target manually before PR #4817, and we + // switched to using url.Parse() in that PR. To avoid breaking existing + // resolver implementations we ended up stripping the leading "/" from the + // endpoint. This obviously does not work for the "unix" scheme. Hence we + // end up using the parsed URL instead. + endpoint := target.URL.Path + if endpoint == "" { + endpoint = target.URL.Opaque + } + addr := resolver.Address{Addr: endpoint} if b.scheme == unixAbstractScheme { - // prepend "\x00" to address for unix-abstract - addr.Addr = "\x00" + addr.Addr + // We can not prepend \0 as c++ gRPC does, as in Golang '@' is used to signify we do + // not want trailing \0 in address. + addr.Addr = "@" + addr.Addr } cc.UpdateState(resolver.State{Addresses: []resolver.Address{networktype.Set(addr, "unix")}}) return &nopResolver{}, nil diff --git a/vendor/google.golang.org/grpc/internal/serviceconfig/duration.go b/vendor/google.golang.org/grpc/internal/serviceconfig/duration.go new file mode 100644 index 000000000..11d82afcc --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/serviceconfig/duration.go @@ -0,0 +1,130 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package serviceconfig + +import ( + "encoding/json" + "fmt" + "math" + "strconv" + "strings" + "time" +) + +// Duration defines JSON marshal and unmarshal methods to conform to the +// protobuf JSON spec defined [here]. +// +// [here]: https://protobuf.dev/reference/protobuf/google.protobuf/#duration +type Duration time.Duration + +func (d Duration) String() string { + return fmt.Sprint(time.Duration(d)) +} + +// MarshalJSON converts from d to a JSON string output. +func (d Duration) MarshalJSON() ([]byte, error) { + ns := time.Duration(d).Nanoseconds() + sec := ns / int64(time.Second) + ns = ns % int64(time.Second) + + var sign string + if sec < 0 || ns < 0 { + sign, sec, ns = "-", -1*sec, -1*ns + } + + // Generated output always contains 0, 3, 6, or 9 fractional digits, + // depending on required precision. + str := fmt.Sprintf("%s%d.%09d", sign, sec, ns) + str = strings.TrimSuffix(str, "000") + str = strings.TrimSuffix(str, "000") + str = strings.TrimSuffix(str, ".000") + return []byte(fmt.Sprintf("\"%ss\"", str)), nil +} + +// UnmarshalJSON unmarshals b as a duration JSON string into d. +func (d *Duration) UnmarshalJSON(b []byte) error { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + if !strings.HasSuffix(s, "s") { + return fmt.Errorf("malformed duration %q: missing seconds unit", s) + } + neg := false + if s[0] == '-' { + neg = true + s = s[1:] + } + ss := strings.SplitN(s[:len(s)-1], ".", 3) + if len(ss) > 2 { + return fmt.Errorf("malformed duration %q: too many decimals", s) + } + // hasDigits is set if either the whole or fractional part of the number is + // present, since both are optional but one is required. + hasDigits := false + var sec, ns int64 + if len(ss[0]) > 0 { + var err error + if sec, err = strconv.ParseInt(ss[0], 10, 64); err != nil { + return fmt.Errorf("malformed duration %q: %v", s, err) + } + // Maximum seconds value per the durationpb spec. + const maxProtoSeconds = 315_576_000_000 + if sec > maxProtoSeconds { + return fmt.Errorf("out of range: %q", s) + } + hasDigits = true + } + if len(ss) == 2 && len(ss[1]) > 0 { + if len(ss[1]) > 9 { + return fmt.Errorf("malformed duration %q: too many digits after decimal", s) + } + var err error + if ns, err = strconv.ParseInt(ss[1], 10, 64); err != nil { + return fmt.Errorf("malformed duration %q: %v", s, err) + } + for i := 9; i > len(ss[1]); i-- { + ns *= 10 + } + hasDigits = true + } + if !hasDigits { + return fmt.Errorf("malformed duration %q: contains no numbers", s) + } + + if neg { + sec *= -1 + ns *= -1 + } + + // Maximum/minimum seconds/nanoseconds representable by Go's time.Duration. + const maxSeconds = math.MaxInt64 / int64(time.Second) + const maxNanosAtMaxSeconds = math.MaxInt64 % int64(time.Second) + const minSeconds = math.MinInt64 / int64(time.Second) + const minNanosAtMinSeconds = math.MinInt64 % int64(time.Second) + + if sec > maxSeconds || (sec == maxSeconds && ns >= maxNanosAtMaxSeconds) { + *d = Duration(math.MaxInt64) + } else if sec < minSeconds || (sec == minSeconds && ns <= minNanosAtMinSeconds) { + *d = Duration(math.MinInt64) + } else { + *d = Duration(sec*int64(time.Second) + ns) + } + return nil +} diff --git a/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go b/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go index c0634d152..51e733e49 100644 --- a/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go +++ b/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go @@ -67,10 +67,10 @@ func (bc *BalancerConfig) MarshalJSON() ([]byte, error) { // ServiceConfig contains a list of loadBalancingConfigs, each with a name and // config. This method iterates through that list in order, and stops at the // first policy that is supported. -// - If the config for the first supported policy is invalid, the whole service -// config is invalid. -// - If the list doesn't contain any supported policy, the whole service config -// is invalid. +// - If the config for the first supported policy is invalid, the whole service +// config is invalid. +// - If the list doesn't contain any supported policy, the whole service config +// is invalid. func (bc *BalancerConfig) UnmarshalJSON(b []byte) error { var ir intermediateBalancerConfig err := json.Unmarshal(b, &ir) @@ -78,6 +78,7 @@ func (bc *BalancerConfig) UnmarshalJSON(b []byte) error { return err } + var names []string for i, lbcfg := range ir { if len(lbcfg) != 1 { return fmt.Errorf("invalid loadBalancingConfig: entry %v does not contain exactly 1 policy/config pair: %q", i, lbcfg) @@ -92,6 +93,7 @@ func (bc *BalancerConfig) UnmarshalJSON(b []byte) error { for name, jsonCfg = range lbcfg { } + names = append(names, name) builder := balancer.Get(name) if builder == nil { // If the balancer is not registered, move on to the next config. @@ -120,7 +122,7 @@ func (bc *BalancerConfig) UnmarshalJSON(b []byte) error { // return. This means we had a loadBalancingConfig slice but did not // encounter a registered policy. The config is considered invalid in this // case. - return fmt.Errorf("invalid loadBalancingConfig: no supported policies found") + return fmt.Errorf("invalid loadBalancingConfig: no supported policies found in %v", names) } // MethodConfig defines the configuration recommended by the service providers for a diff --git a/vendor/google.golang.org/grpc/internal/status/status.go b/vendor/google.golang.org/grpc/internal/status/status.go index e5c6513ed..b0ead4f54 100644 --- a/vendor/google.golang.org/grpc/internal/status/status.go +++ b/vendor/google.golang.org/grpc/internal/status/status.go @@ -164,3 +164,13 @@ func (e *Error) Is(target error) bool { } return proto.Equal(e.s.s, tse.s.s) } + +// IsRestrictedControlPlaneCode returns whether the status includes a code +// restricted for control plane usage as defined by gRFC A54. +func IsRestrictedControlPlaneCode(s *Status) bool { + switch s.Code() { + case codes.InvalidArgument, codes.NotFound, codes.AlreadyExists, codes.FailedPrecondition, codes.Aborted, codes.OutOfRange, codes.DataLoss: + return true + } + return false +} diff --git a/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go b/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go index 4b2964f2a..b3a72276d 100644 --- a/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go +++ b/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go @@ -1,5 +1,3 @@ -// +build !appengine - /* * * Copyright 2018 gRPC authors. diff --git a/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go b/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go index 7913ef1db..999f52cd7 100644 --- a/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go +++ b/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go @@ -1,4 +1,5 @@ -// +build !linux appengine +//go:build !linux +// +build !linux /* * @@ -35,41 +36,41 @@ var logger = grpclog.Component("core") func log() { once.Do(func() { - logger.Info("CPU time info is unavailable on non-linux or appengine environment.") + logger.Info("CPU time info is unavailable on non-linux environments.") }) } -// GetCPUTime returns the how much CPU time has passed since the start of this process. -// It always returns 0 under non-linux or appengine environment. +// GetCPUTime returns the how much CPU time has passed since the start of this +// process. It always returns 0 under non-linux environments. func GetCPUTime() int64 { log() return 0 } -// Rusage is an empty struct under non-linux or appengine environment. +// Rusage is an empty struct under non-linux environments. type Rusage struct{} -// GetRusage is a no-op function under non-linux or appengine environment. +// GetRusage is a no-op function under non-linux environments. func GetRusage() *Rusage { log() return nil } // CPUTimeDiff returns the differences of user CPU time and system CPU time used -// between two Rusage structs. It a no-op function for non-linux or appengine environment. +// between two Rusage structs. It a no-op function for non-linux environments. func CPUTimeDiff(first *Rusage, latest *Rusage) (float64, float64) { log() return 0, 0 } -// SetTCPUserTimeout is a no-op function under non-linux or appengine environments +// SetTCPUserTimeout is a no-op function under non-linux environments. func SetTCPUserTimeout(conn net.Conn, timeout time.Duration) error { log() return nil } -// GetTCPUserTimeout is a no-op function under non-linux or appengine environments -// a negative return value indicates the operation is not supported +// GetTCPUserTimeout is a no-op function under non-linux environments. +// A negative return value indicates the operation is not supported func GetTCPUserTimeout(conn net.Conn) (int, error) { log() return -1, nil diff --git a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go index 45532f8ae..be5a9c81e 100644 --- a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go +++ b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go @@ -22,6 +22,7 @@ import ( "bytes" "errors" "fmt" + "net" "runtime" "strconv" "sync" @@ -29,6 +30,7 @@ import ( "golang.org/x/net/http2" "golang.org/x/net/http2/hpack" + "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcutil" "google.golang.org/grpc/status" ) @@ -133,9 +135,11 @@ type cleanupStream struct { func (c *cleanupStream) isTransportResponseFrame() bool { return c.rst } // Results in a RST_STREAM type earlyAbortStream struct { + httpStatus uint32 streamID uint32 contentSubtype string status *status.Status + rst bool } func (*earlyAbortStream) isTransportResponseFrame() bool { return false } @@ -189,7 +193,7 @@ type goAway struct { code http2.ErrCode debugData []byte headsUp bool - closeConn bool + closeConn error // if set, loopyWriter will exit, resulting in conn closure } func (*goAway) isTransportResponseFrame() bool { return false } @@ -207,6 +211,14 @@ type outFlowControlSizeRequest struct { func (*outFlowControlSizeRequest) isTransportResponseFrame() bool { return false } +// closeConnection is an instruction to tell the loopy writer to flush the +// framer and exit, which will cause the transport's connection to be closed +// (by the client or server). The transport itself will close after the reader +// encounters the EOF caused by the connection closure. +type closeConnection struct{} + +func (closeConnection) isTransportResponseFrame() bool { return false } + type outStreamState int const ( @@ -406,7 +418,7 @@ func (c *controlBuffer) get(block bool) (interface{}, error) { select { case <-c.ch: case <-c.done: - return nil, ErrConnClosing + return nil, errors.New("transport closed by client") } } } @@ -476,12 +488,14 @@ type loopyWriter struct { hEnc *hpack.Encoder // HPACK encoder. bdpEst *bdpEstimator draining bool + conn net.Conn + logger *grpclog.PrefixLogger // Side-specific handlers ssGoAwayHandler func(*goAway) (bool, error) } -func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator) *loopyWriter { +func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator, conn net.Conn, logger *grpclog.PrefixLogger) *loopyWriter { var buf bytes.Buffer l := &loopyWriter{ side: s, @@ -494,6 +508,8 @@ func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimato hBuf: &buf, hEnc: hpack.NewEncoder(&buf), bdpEst: bdpEst, + conn: conn, + logger: logger, } return l } @@ -511,23 +527,26 @@ const minBatchSize = 1000 // 2. Stream level flow control quota available. // // In each iteration of run loop, other than processing the incoming control -// frame, loopy calls processData, which processes one node from the activeStreams linked-list. -// This results in writing of HTTP2 frames into an underlying write buffer. -// When there's no more control frames to read from controlBuf, loopy flushes the write buffer. -// As an optimization, to increase the batch size for each flush, loopy yields the processor, once -// if the batch size is too low to give stream goroutines a chance to fill it up. +// frame, loopy calls processData, which processes one node from the +// activeStreams linked-list. This results in writing of HTTP2 frames into an +// underlying write buffer. When there's no more control frames to read from +// controlBuf, loopy flushes the write buffer. As an optimization, to increase +// the batch size for each flush, loopy yields the processor, once if the batch +// size is too low to give stream goroutines a chance to fill it up. +// +// Upon exiting, if the error causing the exit is not an I/O error, run() +// flushes and closes the underlying connection. Otherwise, the connection is +// left open to allow the I/O error to be encountered by the reader instead. func (l *loopyWriter) run() (err error) { defer func() { - if err == ErrConnClosing { - // Don't log ErrConnClosing as error since it happens - // 1. When the connection is closed by some other known issue. - // 2. User closed the connection. - // 3. A graceful close of connection. - if logger.V(logLevel) { - logger.Infof("transport: loopyWriter.run returning. %v", err) - } - err = nil + if l.logger.V(logLevel) { + l.logger.Infof("loopyWriter exiting with error: %v", err) } + if !isIOError(err) { + l.framer.writer.Flush() + l.conn.Close() + } + l.cbuf.finish() }() for { it, err := l.cbuf.get(true) @@ -572,7 +591,6 @@ func (l *loopyWriter) run() (err error) { } l.framer.writer.Flush() break hasdata - } } } @@ -581,11 +599,11 @@ func (l *loopyWriter) outgoingWindowUpdateHandler(w *outgoingWindowUpdate) error return l.framer.fr.WriteWindowUpdate(w.streamID, w.increment) } -func (l *loopyWriter) incomingWindowUpdateHandler(w *incomingWindowUpdate) error { +func (l *loopyWriter) incomingWindowUpdateHandler(w *incomingWindowUpdate) { // Otherwise update the quota. if w.streamID == 0 { l.sendQuota += w.increment - return nil + return } // Find the stream and update it. if str, ok := l.estdStreams[w.streamID]; ok { @@ -593,10 +611,9 @@ func (l *loopyWriter) incomingWindowUpdateHandler(w *incomingWindowUpdate) error if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota > 0 && str.state == waitingOnStreamQuota { str.state = active l.activeStreams.enqueue(str) - return nil + return } } - return nil } func (l *loopyWriter) outgoingSettingsHandler(s *outgoingSettings) error { @@ -604,13 +621,11 @@ func (l *loopyWriter) outgoingSettingsHandler(s *outgoingSettings) error { } func (l *loopyWriter) incomingSettingsHandler(s *incomingSettings) error { - if err := l.applySettings(s.ss); err != nil { - return err - } + l.applySettings(s.ss) return l.framer.fr.WriteSettingsAck() } -func (l *loopyWriter) registerStreamHandler(h *registerStream) error { +func (l *loopyWriter) registerStreamHandler(h *registerStream) { str := &outStream{ id: h.streamID, state: empty, @@ -618,15 +633,14 @@ func (l *loopyWriter) registerStreamHandler(h *registerStream) error { wq: h.wq, } l.estdStreams[h.streamID] = str - return nil } func (l *loopyWriter) headerHandler(h *headerFrame) error { if l.side == serverSide { str, ok := l.estdStreams[h.streamID] if !ok { - if logger.V(logLevel) { - logger.Warningf("transport: loopy doesn't recognize the stream: %d", h.streamID) + if l.logger.V(logLevel) { + l.logger.Infof("Unrecognized streamID %d in loopyWriter", h.streamID) } return nil } @@ -653,19 +667,20 @@ func (l *loopyWriter) headerHandler(h *headerFrame) error { itl: &itemList{}, wq: h.wq, } - str.itl.enqueue(h) - return l.originateStream(str) + return l.originateStream(str, h) } -func (l *loopyWriter) originateStream(str *outStream) error { - hdr := str.itl.dequeue().(*headerFrame) - if err := hdr.initStream(str.id); err != nil { - if err == ErrConnClosing { - return err - } - // Other errors(errStreamDrain) need not close transport. +func (l *loopyWriter) originateStream(str *outStream, hdr *headerFrame) error { + // l.draining is set when handling GoAway. In which case, we want to avoid + // creating new streams. + if l.draining { + // TODO: provide a better error with the reason we are in draining. + hdr.onOrphaned(errStreamDrain) return nil } + if err := hdr.initStream(str.id); err != nil { + return err + } if err := l.writeHeader(str.id, hdr.endStream, hdr.hf, hdr.onWrite); err != nil { return err } @@ -680,8 +695,8 @@ func (l *loopyWriter) writeHeader(streamID uint32, endStream bool, hf []hpack.He l.hBuf.Reset() for _, f := range hf { if err := l.hEnc.WriteField(f); err != nil { - if logger.V(logLevel) { - logger.Warningf("transport: loopyWriter.writeHeader encountered error while encoding headers: %v", err) + if l.logger.V(logLevel) { + l.logger.Warningf("Encountered error while encoding headers: %v", err) } } } @@ -719,10 +734,10 @@ func (l *loopyWriter) writeHeader(streamID uint32, endStream bool, hf []hpack.He return nil } -func (l *loopyWriter) preprocessData(df *dataFrame) error { +func (l *loopyWriter) preprocessData(df *dataFrame) { str, ok := l.estdStreams[df.streamID] if !ok { - return nil + return } // If we got data for a stream it means that // stream was originated and the headers were sent out. @@ -731,7 +746,6 @@ func (l *loopyWriter) preprocessData(df *dataFrame) error { str.state = active l.activeStreams.enqueue(str) } - return nil } func (l *loopyWriter) pingHandler(p *ping) error { @@ -742,9 +756,8 @@ func (l *loopyWriter) pingHandler(p *ping) error { } -func (l *loopyWriter) outFlowControlSizeRequestHandler(o *outFlowControlSizeRequest) error { +func (l *loopyWriter) outFlowControlSizeRequestHandler(o *outFlowControlSizeRequest) { o.resp <- l.sendQuota - return nil } func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error { @@ -761,8 +774,9 @@ func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error { return err } } - if l.side == clientSide && l.draining && len(l.estdStreams) == 0 { - return ErrConnClosing + if l.draining && len(l.estdStreams) == 0 { + // Flush and close the connection; we are done with it. + return errors.New("finished processing active streams while in draining mode") } return nil } @@ -771,9 +785,12 @@ func (l *loopyWriter) earlyAbortStreamHandler(eas *earlyAbortStream) error { if l.side == clientSide { return errors.New("earlyAbortStream not handled on client") } - + // In case the caller forgets to set the http status, default to 200. + if eas.httpStatus == 0 { + eas.httpStatus = 200 + } headerFields := []hpack.HeaderField{ - {Name: ":status", Value: "200"}, + {Name: ":status", Value: strconv.Itoa(int(eas.httpStatus))}, {Name: "content-type", Value: grpcutil.ContentType(eas.contentSubtype)}, {Name: "grpc-status", Value: strconv.Itoa(int(eas.status.Code()))}, {Name: "grpc-message", Value: encodeGrpcMessage(eas.status.Message())}, @@ -782,6 +799,11 @@ func (l *loopyWriter) earlyAbortStreamHandler(eas *earlyAbortStream) error { if err := l.writeHeader(eas.streamID, true, headerFields, nil); err != nil { return err } + if eas.rst { + if err := l.framer.fr.WriteRSTStream(eas.streamID, http2.ErrCodeNo); err != nil { + return err + } + } return nil } @@ -789,7 +811,8 @@ func (l *loopyWriter) incomingGoAwayHandler(*incomingGoAway) error { if l.side == clientSide { l.draining = true if len(l.estdStreams) == 0 { - return ErrConnClosing + // Flush and close the connection; we are done with it. + return errors.New("received GOAWAY with no active streams") } } return nil @@ -810,7 +833,7 @@ func (l *loopyWriter) goAwayHandler(g *goAway) error { func (l *loopyWriter) handle(i interface{}) error { switch i := i.(type) { case *incomingWindowUpdate: - return l.incomingWindowUpdateHandler(i) + l.incomingWindowUpdateHandler(i) case *outgoingWindowUpdate: return l.outgoingWindowUpdateHandler(i) case *incomingSettings: @@ -820,7 +843,7 @@ func (l *loopyWriter) handle(i interface{}) error { case *headerFrame: return l.headerHandler(i) case *registerStream: - return l.registerStreamHandler(i) + l.registerStreamHandler(i) case *cleanupStream: return l.cleanupStreamHandler(i) case *earlyAbortStream: @@ -828,19 +851,24 @@ func (l *loopyWriter) handle(i interface{}) error { case *incomingGoAway: return l.incomingGoAwayHandler(i) case *dataFrame: - return l.preprocessData(i) + l.preprocessData(i) case *ping: return l.pingHandler(i) case *goAway: return l.goAwayHandler(i) case *outFlowControlSizeRequest: - return l.outFlowControlSizeRequestHandler(i) + l.outFlowControlSizeRequestHandler(i) + case closeConnection: + // Just return a non-I/O error and run() will flush and close the + // connection. + return ErrConnClosing default: return fmt.Errorf("transport: unknown control message type %T", i) } + return nil } -func (l *loopyWriter) applySettings(ss []http2.Setting) error { +func (l *loopyWriter) applySettings(ss []http2.Setting) { for _, s := range ss { switch s.ID { case http2.SettingInitialWindowSize: @@ -859,7 +887,6 @@ func (l *loopyWriter) applySettings(ss []http2.Setting) error { updateHeaderTblSize(l.hEnc, s.Val) } } - return nil } // processData removes the first stream from active streams, writes out at most 16KB @@ -876,9 +903,9 @@ func (l *loopyWriter) processData() (bool, error) { dataItem := str.itl.peek().(*dataFrame) // Peek at the first data item this stream. // A data item is represented by a dataFrame, since it later translates into // multiple HTTP2 data frames. - // Every dataFrame has two buffers; h that keeps grpc-message header and d that is acutal data. + // Every dataFrame has two buffers; h that keeps grpc-message header and d that is actual data. // As an optimization to keep wire traffic low, data from d is copied to h to make as big as the - // maximum possilbe HTTP2 frame size. + // maximum possible HTTP2 frame size. if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // Empty data frame // Client sends out empty data frame with endStream = true @@ -893,7 +920,7 @@ func (l *loopyWriter) processData() (bool, error) { return false, err } if err := l.cleanupStreamHandler(trailer.cleanup); err != nil { - return false, nil + return false, err } } else { l.activeStreams.enqueue(str) diff --git a/vendor/google.golang.org/grpc/internal/transport/defaults.go b/vendor/google.golang.org/grpc/internal/transport/defaults.go index 9fa306b2e..bc8ee0747 100644 --- a/vendor/google.golang.org/grpc/internal/transport/defaults.go +++ b/vendor/google.golang.org/grpc/internal/transport/defaults.go @@ -47,3 +47,9 @@ const ( defaultClientMaxHeaderListSize = uint32(16 << 20) defaultServerMaxHeaderListSize = uint32(16 << 20) ) + +// MaxStreamID is the upper bound for the stream ID before the current +// transport gracefully closes and new transport is created for subsequent RPCs. +// This is set to 75% of 2^31-1. Streams are identified with an unsigned 31-bit +// integer. It's exported so that tests can override it. +var MaxStreamID = uint32(math.MaxInt32 * 3 / 4) diff --git a/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go b/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go index f262edd8e..97198c515 100644 --- a/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go +++ b/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go @@ -136,12 +136,10 @@ type inFlow struct { // newLimit updates the inflow window to a new value n. // It assumes that n is always greater than the old limit. -func (f *inFlow) newLimit(n uint32) uint32 { +func (f *inFlow) newLimit(n uint32) { f.mu.Lock() - d := n - f.limit f.limit = n f.mu.Unlock() - return d } func (f *inFlow) maybeAdjust(n uint32) uint32 { diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go index 1c3459c2b..98f80e3fa 100644 --- a/vendor/google.golang.org/grpc/internal/transport/handler_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go @@ -39,6 +39,7 @@ import ( "golang.org/x/net/http2" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcutil" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" @@ -46,24 +47,32 @@ import ( "google.golang.org/grpc/status" ) -// NewServerHandlerTransport returns a ServerTransport handling gRPC -// from inside an http.Handler. It requires that the http Server -// supports HTTP/2. -func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats stats.Handler) (ServerTransport, error) { +// NewServerHandlerTransport returns a ServerTransport handling gRPC from +// inside an http.Handler, or writes an HTTP error to w and returns an error. +// It requires that the http Server supports HTTP/2. +func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []stats.Handler) (ServerTransport, error) { if r.ProtoMajor != 2 { - return nil, errors.New("gRPC requires HTTP/2") + msg := "gRPC requires HTTP/2" + http.Error(w, msg, http.StatusBadRequest) + return nil, errors.New(msg) } if r.Method != "POST" { - return nil, errors.New("invalid gRPC request method") + msg := fmt.Sprintf("invalid gRPC request method %q", r.Method) + http.Error(w, msg, http.StatusBadRequest) + return nil, errors.New(msg) } contentType := r.Header.Get("Content-Type") // TODO: do we assume contentType is lowercase? we did before contentSubtype, validContentType := grpcutil.ContentSubtype(contentType) if !validContentType { - return nil, errors.New("invalid gRPC request content-type") + msg := fmt.Sprintf("invalid gRPC request content-type %q", contentType) + http.Error(w, msg, http.StatusUnsupportedMediaType) + return nil, errors.New(msg) } if _, ok := w.(http.Flusher); !ok { - return nil, errors.New("gRPC requires a ResponseWriter supporting http.Flusher") + msg := "gRPC requires a ResponseWriter supporting http.Flusher" + http.Error(w, msg, http.StatusInternalServerError) + return nil, errors.New(msg) } st := &serverHandlerTransport{ @@ -75,11 +84,14 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats sta contentSubtype: contentSubtype, stats: stats, } + st.logger = prefixLoggerForServerHandlerTransport(st) if v := r.Header.Get("grpc-timeout"); v != "" { to, err := decodeTimeout(v) if err != nil { - return nil, status.Errorf(codes.Internal, "malformed time-out: %v", err) + msg := fmt.Sprintf("malformed grpc-timeout: %v", err) + http.Error(w, msg, http.StatusBadRequest) + return nil, status.Error(codes.Internal, msg) } st.timeoutSet = true st.timeout = to @@ -97,7 +109,9 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats sta for _, v := range vv { v, err := decodeMetadataHeader(k, v) if err != nil { - return nil, status.Errorf(codes.Internal, "malformed binary metadata: %v", err) + msg := fmt.Sprintf("malformed binary metadata %q in header %q: %v", v, k, err) + http.Error(w, msg, http.StatusBadRequest) + return nil, status.Error(codes.Internal, msg) } metakv = append(metakv, k, v) } @@ -138,15 +152,19 @@ type serverHandlerTransport struct { // TODO make sure this is consistent across handler_server and http2_server contentSubtype string - stats stats.Handler + stats []stats.Handler + logger *grpclog.PrefixLogger } -func (ht *serverHandlerTransport) Close() { - ht.closeOnce.Do(ht.closeCloseChanOnce) +func (ht *serverHandlerTransport) Close(err error) { + ht.closeOnce.Do(func() { + if ht.logger.V(logLevel) { + ht.logger.Infof("Closing: %v", err) + } + close(ht.closedCh) + }) } -func (ht *serverHandlerTransport) closeCloseChanOnce() { close(ht.closedCh) } - func (ht *serverHandlerTransport) RemoteAddr() net.Addr { return strAddr(ht.req.RemoteAddr) } // strAddr is a net.Addr backed by either a TCP "ip:port" string, or @@ -228,15 +246,15 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro }) if err == nil { // transport has not been closed - if ht.stats != nil { - // Note: The trailer fields are compressed with hpack after this call returns. - // No WireLength field is set here. - ht.stats.HandleRPC(s.Context(), &stats.OutTrailer{ + // Note: The trailer fields are compressed with hpack after this call returns. + // No WireLength field is set here. + for _, sh := range ht.stats { + sh.HandleRPC(s.Context(), &stats.OutTrailer{ Trailer: s.trailer.Copy(), }) } } - ht.Close() + ht.Close(errors.New("finished writing status")) return err } @@ -314,10 +332,10 @@ func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error { }) if err == nil { - if ht.stats != nil { + for _, sh := range ht.stats { // Note: The header fields are compressed with hpack after this call returns. // No WireLength field is set here. - ht.stats.HandleRPC(s.Context(), &stats.OutHeader{ + sh.HandleRPC(s.Context(), &stats.OutHeader{ Header: md.Copy(), Compression: s.sendCompress, }) @@ -346,7 +364,7 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), trace case <-ht.req.Context().Done(): } cancel() - ht.Close() + ht.Close(errors.New("request is done processing")) }() req := ht.req @@ -369,14 +387,14 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), trace } ctx = metadata.NewIncomingContext(ctx, ht.headerMD) s.ctx = peer.NewContext(ctx, pr) - if ht.stats != nil { - s.ctx = ht.stats.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method}) + for _, sh := range ht.stats { + s.ctx = sh.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method}) inHeader := &stats.InHeader{ FullMethod: s.method, RemoteAddr: ht.RemoteAddr(), Compression: s.recvCompress, } - ht.stats.HandleRPC(s.ctx, inHeader) + sh.HandleRPC(s.ctx, inHeader) } s.trReader = &transportReader{ reader: &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf, freeBuffer: func(*bytes.Buffer) {}}, @@ -435,17 +453,17 @@ func (ht *serverHandlerTransport) IncrMsgSent() {} func (ht *serverHandlerTransport) IncrMsgRecv() {} -func (ht *serverHandlerTransport) Drain() { +func (ht *serverHandlerTransport) Drain(debugData string) { panic("Drain() is not implemented") } // mapRecvMsgError returns the non-nil err into the appropriate // error value as expected by callers of *grpc.parser.recvMsg. // In particular, in can only be: -// * io.EOF -// * io.ErrUnexpectedEOF -// * of type transport.ConnectionError -// * an error from the status package +// - io.EOF +// - io.ErrUnexpectedEOF +// - of type transport.ConnectionError +// - an error from the status package func mapRecvMsgError(err error) error { if err == io.EOF || err == io.ErrUnexpectedEOF { return err diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go index 0cd6da1e7..326bf0848 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -25,6 +25,7 @@ import ( "math" "net" "net/http" + "path/filepath" "strconv" "strings" "sync" @@ -37,8 +38,11 @@ import ( "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal/channelz" icredentials "google.golang.org/grpc/internal/credentials" + "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/grpcutil" imetadata "google.golang.org/grpc/internal/metadata" + istatus "google.golang.org/grpc/internal/status" "google.golang.org/grpc/internal/syscall" "google.golang.org/grpc/internal/transport/networktype" "google.golang.org/grpc/keepalive" @@ -56,11 +60,15 @@ var clientConnectionCounter uint64 // http2Client implements the ClientTransport interface with HTTP2. type http2Client struct { - lastRead int64 // Keep this field 64-bit aligned. Accessed atomically. - ctx context.Context - cancel context.CancelFunc - ctxDone <-chan struct{} // Cache the ctx.Done() chan. - userAgent string + lastRead int64 // Keep this field 64-bit aligned. Accessed atomically. + ctx context.Context + cancel context.CancelFunc + ctxDone <-chan struct{} // Cache the ctx.Done() chan. + userAgent string + // address contains the resolver returned address for this transport. + // If the `ServerName` field is set, it takes precedence over `CallHdr.Host` + // passed to `NewStream`, when determining the :authority header. + address resolver.Address md metadata.MD conn net.Conn // underlying communication channel loopy *loopyWriter @@ -77,6 +85,7 @@ type http2Client struct { framer *framer // controlBuf delivers all the control related tasks (e.g., window // updates, reset streams, and various settings) to the controller. + // Do not access controlBuf with mu held. controlBuf *controlBuffer fc *trInFlow // The scheme used: https if TLS is on, http otherwise. @@ -89,7 +98,7 @@ type http2Client struct { kp keepalive.ClientParameters keepaliveEnabled bool - statsHandler stats.Handler + statsHandlers []stats.Handler initialWindowSize int32 @@ -97,17 +106,15 @@ type http2Client struct { maxSendHeaderListSize *uint32 bdpEst *bdpEstimator - // onPrefaceReceipt is a callback that client transport calls upon - // receiving server preface to signal that a succefull HTTP2 - // connection was established. - onPrefaceReceipt func() maxConcurrentStreams uint32 streamQuota int64 streamsQuotaAvailable chan struct{} waitingStreams uint32 nextID uint32 + registeredCompressors string + // Do not access controlBuf with mu held. mu sync.Mutex // guard the following variables state transportState activeStreams map[uint32]*Stream @@ -131,28 +138,35 @@ type http2Client struct { kpDormant bool // Fields below are for channelz metric collection. - channelzID int64 // channelz unique identification number + channelzID *channelz.Identifier czData *channelzData - onGoAway func(GoAwayReason) - onClose func() + onClose func(GoAwayReason) bufferPool *bufferPool connectionID uint64 + logger *grpclog.PrefixLogger } func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error), addr resolver.Address, useProxy bool, grpcUA string) (net.Conn, error) { address := addr.Addr networkType, ok := networktype.Get(addr) if fn != nil { + // Special handling for unix scheme with custom dialer. Back in the day, + // we did not have a unix resolver and therefore targets with a unix + // scheme would end up using the passthrough resolver. So, user's used a + // custom dialer in this case and expected the original dial target to + // be passed to the custom dialer. Now, we have a unix resolver. But if + // a custom dialer is specified, we want to retain the old behavior in + // terms of the address being passed to the custom dialer. if networkType == "unix" && !strings.HasPrefix(address, "\x00") { - // For backward compatibility, if the user dialed "unix:///path", - // the passthrough resolver would be used and the user's custom - // dialer would see "unix:///path". Since the unix resolver is used - // and the address is now "/path", prepend "unix://" so the user's - // custom dialer sees the same address. - return fn(ctx, "unix://"+address) + // Supported unix targets are either "unix://absolute-path" or + // "unix:relative-path". + if filepath.IsAbs(address) { + return fn(ctx, "unix://"+address) + } + return fn(ctx, "unix:"+address) } return fn(ctx, address) } @@ -184,7 +198,7 @@ func isTemporary(err error) bool { // newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2 // and starts to receive messages on it. Non-nil error returns if construction // fails. -func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onPrefaceReceipt func(), onGoAway func(GoAwayReason), onClose func()) (_ *http2Client, err error) { +func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onClose func(GoAwayReason)) (_ *http2Client, err error) { scheme := "http" ctx, cancel := context.WithCancel(ctx) defer func() { @@ -193,19 +207,51 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts } }() + // gRPC, resolver, balancer etc. can specify arbitrary data in the + // Attributes field of resolver.Address, which is shoved into connectCtx + // and passed to the dialer and credential handshaker. This makes it possible for + // address specific arbitrary data to reach custom dialers and credential handshakers. + connectCtx = icredentials.NewClientHandshakeInfoContext(connectCtx, credentials.ClientHandshakeInfo{Attributes: addr.Attributes}) + conn, err := dial(connectCtx, opts.Dialer, addr, opts.UseProxy, opts.UserAgent) if err != nil { if opts.FailOnNonTempDialError { return nil, connectionErrorf(isTemporary(err), err, "transport: error while dialing: %v", err) } - return nil, connectionErrorf(true, err, "transport: Error while dialing %v", err) + return nil, connectionErrorf(true, err, "transport: Error while dialing: %v", err) } + // Any further errors will close the underlying connection defer func(conn net.Conn) { if err != nil { conn.Close() } }(conn) + + // The following defer and goroutine monitor the connectCtx for cancelation + // and deadline. On context expiration, the connection is hard closed and + // this function will naturally fail as a result. Otherwise, the defer + // waits for the goroutine to exit to prevent the context from being + // monitored (and to prevent the connection from ever being closed) after + // returning from this function. + ctxMonitorDone := grpcsync.NewEvent() + newClientCtx, newClientDone := context.WithCancel(connectCtx) + defer func() { + newClientDone() // Awaken the goroutine below if connectCtx hasn't expired. + <-ctxMonitorDone.Done() // Wait for the goroutine below to exit. + }() + go func(conn net.Conn) { + defer ctxMonitorDone.Fire() // Signal this goroutine has exited. + <-newClientCtx.Done() // Block until connectCtx expires or the defer above executes. + if err := connectCtx.Err(); err != nil { + // connectCtx expired before exiting the function. Hard close the connection. + if logger.V(logLevel) { + logger.Infof("Aborting due to connect deadline expiring: %v", err) + } + conn.Close() + } + }(conn) + kp := opts.KeepaliveParams // Validate keepalive parameters. if kp.Time == 0 { @@ -237,20 +283,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts } } if transportCreds != nil { - // gRPC, resolver, balancer etc. can specify arbitrary data in the - // Attributes field of resolver.Address, which is shoved into connectCtx - // and passed to the credential handshaker. This makes it possible for - // address specific arbitrary data to reach the credential handshaker. - connectCtx = icredentials.NewClientHandshakeInfoContext(connectCtx, credentials.ClientHandshakeInfo{Attributes: addr.Attributes}) - rawConn := conn - // Pull the deadline from the connectCtx, which will be used for - // timeouts in the authentication protocol handshake. Can ignore the - // boolean as the deadline will return the zero value, which will make - // the conn not timeout on I/O operations. - deadline, _ := connectCtx.Deadline() - rawConn.SetDeadline(deadline) - conn, authInfo, err = transportCreds.ClientHandshake(connectCtx, addr.ServerName, rawConn) - rawConn.SetDeadline(time.Time{}) + conn, authInfo, err = transportCreds.ClientHandshake(connectCtx, addr.ServerName, conn) if err != nil { return nil, connectionErrorf(isTemporary(err), err, "transport: authentication handshake failed: %v", err) } @@ -288,6 +321,8 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts ctxDone: ctx.Done(), // Cache Done chan. cancel: cancel, userAgent: opts.UserAgent, + registeredCompressors: grpcutil.RegisteredCompressors(), + address: addr, conn: conn, remoteAddr: conn.RemoteAddr(), localAddr: conn.LocalAddr(), @@ -302,19 +337,20 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts isSecure: isSecure, perRPCCreds: perRPCCreds, kp: kp, - statsHandler: opts.StatsHandler, + statsHandlers: opts.StatsHandlers, initialWindowSize: initialWindowSize, - onPrefaceReceipt: onPrefaceReceipt, nextID: 1, maxConcurrentStreams: defaultMaxStreamsClient, streamQuota: defaultMaxStreamsClient, streamsQuotaAvailable: make(chan struct{}, 1), czData: new(channelzData), - onGoAway: onGoAway, - onClose: onClose, keepaliveEnabled: keepaliveEnabled, bufferPool: newBufferPool(), + onClose: onClose, } + t.logger = prefixLoggerForClientTransport(t) + // Add peer information to the http2client context. + t.ctx = peer.NewContext(t.ctx, t.getPeer()) if md, ok := addr.Metadata.(*metadata.MD); ok { t.md = *md @@ -332,38 +368,50 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts updateFlowControl: t.updateFlowControl, } } - if t.statsHandler != nil { - t.ctx = t.statsHandler.TagConn(t.ctx, &stats.ConnTagInfo{ + for _, sh := range t.statsHandlers { + t.ctx = sh.TagConn(t.ctx, &stats.ConnTagInfo{ RemoteAddr: t.remoteAddr, LocalAddr: t.localAddr, }) connBegin := &stats.ConnBegin{ Client: true, } - t.statsHandler.HandleConn(t.ctx, connBegin) + sh.HandleConn(t.ctx, connBegin) } - if channelz.IsOn() { - t.channelzID = channelz.RegisterNormalSocket(t, opts.ChannelzParentID, fmt.Sprintf("%s -> %s", t.localAddr, t.remoteAddr)) + t.channelzID, err = channelz.RegisterNormalSocket(t, opts.ChannelzParentID, fmt.Sprintf("%s -> %s", t.localAddr, t.remoteAddr)) + if err != nil { + return nil, err } if t.keepaliveEnabled { t.kpDormancyCond = sync.NewCond(&t.mu) go t.keepalive() } - // Start the reader goroutine for incoming message. Each transport has - // a dedicated goroutine which reads HTTP2 frame from network. Then it - // dispatches the frame to the corresponding stream entity. - go t.reader() + + // Start the reader goroutine for incoming messages. Each transport has a + // dedicated goroutine which reads HTTP2 frames from the network. Then it + // dispatches the frame to the corresponding stream entity. When the + // server preface is received, readerErrCh is closed. If an error occurs + // first, an error is pushed to the channel. This must be checked before + // returning from this function. + readerErrCh := make(chan error, 1) + go t.reader(readerErrCh) + defer func() { + if err == nil { + err = <-readerErrCh + } + if err != nil { + t.Close(err) + } + }() // Send connection preface to server. n, err := t.conn.Write(clientPreface) if err != nil { err = connectionErrorf(true, err, "transport: failed to write client preface: %v", err) - t.Close(err) return nil, err } if n != len(clientPreface) { err = connectionErrorf(true, nil, "transport: preface mismatch, wrote %d bytes; want %d", n, len(clientPreface)) - t.Close(err) return nil, err } var ss []http2.Setting @@ -383,14 +431,12 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts err = t.framer.fr.WriteSettings(ss...) if err != nil { err = connectionErrorf(true, err, "transport: failed to write initial settings frame: %v", err) - t.Close(err) return nil, err } // Adjust the connection flow control window if needed. if delta := uint32(icwz - defaultWindowSize); delta > 0 { if err := t.framer.fr.WriteWindowUpdate(0, delta); err != nil { err = connectionErrorf(true, err, "transport: failed to write window update: %v", err) - t.Close(err) return nil, err } } @@ -401,17 +447,8 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts return nil, err } go func() { - t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst) - err := t.loopy.run() - if err != nil { - if logger.V(logLevel) { - logger.Errorf("transport: loopyWriter.run returning. Err: %v", err) - } - } - // Do not close the transport. Let reader goroutine handle it since - // there might be data in the buffers. - t.conn.Close() - t.controlBuf.finish() + t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger) + t.loopy.run() close(t.writerDone) }() return t, nil @@ -457,7 +494,7 @@ func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream { func (t *http2Client) getPeer() *peer.Peer { return &peer.Peer{ Addr: t.remoteAddr, - AuthInfo: t.authInfo, + AuthInfo: t.authInfo, // Can be nil } } @@ -493,9 +530,22 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-previous-rpc-attempts", Value: strconv.Itoa(callHdr.PreviousAttempts)}) } + registeredCompressors := t.registeredCompressors if callHdr.SendCompress != "" { headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: callHdr.SendCompress}) - headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-accept-encoding", Value: callHdr.SendCompress}) + // Include the outgoing compressor name when compressor is not registered + // via encoding.RegisterCompressor. This is possible when client uses + // WithCompressor dial option. + if !grpcutil.IsCompressorNameRegistered(callHdr.SendCompress) { + if registeredCompressors != "" { + registeredCompressors += "," + } + registeredCompressors += callHdr.SendCompress + } + } + + if registeredCompressors != "" { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-accept-encoding", Value: registeredCompressors}) } if dl, ok := ctx.Deadline(); ok { // Send out timeout regardless its value. The server can detect timeout context by itself. @@ -575,11 +625,15 @@ func (t *http2Client) getTrAuthData(ctx context.Context, audience string) (map[s for _, c := range t.perRPCCreds { data, err := c.GetRequestMetadata(ctx, audience) if err != nil { - if _, ok := status.FromError(err); ok { + if st, ok := status.FromError(err); ok { + // Restrict the code to the list allowed by gRFC A54. + if istatus.IsRestrictedControlPlaneCode(st) { + err = status.Errorf(codes.Internal, "transport: received per-RPC creds error with illegal status: %v", err) + } return nil, err } - return nil, status.Errorf(codes.Unauthenticated, "transport: %v", err) + return nil, status.Errorf(codes.Unauthenticated, "transport: per-RPC creds failed due to error: %v", err) } for k, v := range data { // Capital header names are illegal in HTTP/2. @@ -604,7 +658,14 @@ func (t *http2Client) getCallAuthData(ctx context.Context, audience string, call } data, err := callCreds.GetRequestMetadata(ctx, audience) if err != nil { - return nil, status.Errorf(codes.Internal, "transport: %v", err) + if st, ok := status.FromError(err); ok { + // Restrict the code to the list allowed by gRFC A54. + if istatus.IsRestrictedControlPlaneCode(st) { + err = status.Errorf(codes.Internal, "transport: received per-RPC creds error with illegal status: %v", err) + } + return nil, err + } + return nil, status.Errorf(codes.Internal, "transport: per-RPC creds failed due to error: %v", err) } callAuthData = make(map[string]string, len(data)) for k, v := range data { @@ -616,12 +677,21 @@ func (t *http2Client) getCallAuthData(ctx context.Context, audience string, call return callAuthData, nil } -// NewStreamError wraps an error and reports additional information. +// NewStreamError wraps an error and reports additional information. Typically +// NewStream errors result in transparent retry, as they mean nothing went onto +// the wire. However, there are two notable exceptions: +// +// 1. If the stream headers violate the max header list size allowed by the +// server. It's possible this could succeed on another transport, even if +// it's unlikely, but do not transparently retry. +// 2. If the credentials errored when requesting their headers. In this case, +// it's possible a retry can fix the problem, but indefinitely transparently +// retrying is not appropriate as it is likely the credentials, if they can +// eventually succeed, would need I/O to do so. type NewStreamError struct { Err error - DoNotRetry bool - PerformedIO bool + AllowTransparentRetry bool } func (e NewStreamError) Error() string { @@ -630,25 +700,23 @@ func (e NewStreamError) Error() string { // NewStream creates a stream and registers it into the transport as "active" // streams. All non-nil errors returned will be *NewStreamError. -func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Stream, err error) { - defer func() { - if err != nil { - nse, ok := err.(*NewStreamError) - if !ok { - nse = &NewStreamError{Err: err} - } - if len(t.perRPCCreds) > 0 || callHdr.Creds != nil { - // We may have performed I/O in the per-RPC creds callback, so do not - // allow transparent retry. - nse.PerformedIO = true - } - err = nse - } - }() +func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) { ctx = peer.NewContext(ctx, t.getPeer()) + + // ServerName field of the resolver returned address takes precedence over + // Host field of CallHdr to determine the :authority header. This is because, + // the ServerName field takes precedence for server authentication during + // TLS handshake, and the :authority header should match the value used + // for server authentication. + if t.address.ServerName != "" { + newCallHdr := *callHdr + newCallHdr.Host = t.address.ServerName + callHdr = &newCallHdr + } + headerFields, err := t.createHeaderFields(ctx, callHdr) if err != nil { - return nil, err + return nil, &NewStreamError{Err: err, AllowTransparentRetry: false} } s := t.newStream(ctx, callHdr) cleanup := func(err error) { @@ -670,17 +738,13 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea endStream: false, initStream: func(id uint32) error { t.mu.Lock() - if state := t.state; state != reachable { + // TODO: handle transport closure in loopy instead and remove this + // initStream is never called when transport is draining. + if t.state == closing { t.mu.Unlock() - // Do a quick cleanup. - err := error(errStreamDrain) - if state == closing { - err = ErrConnClosing - } - cleanup(err) - return err + cleanup(ErrConnClosing) + return ErrConnClosing } - t.activeStreams[id] = s if channelz.IsOn() { atomic.AddInt64(&t.czData.streamsStarted, 1) atomic.StoreInt64(&t.czData.lastStreamCreatedTime, time.Now().UnixNano()) @@ -697,6 +761,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea } firstTry := true var ch chan struct{} + transportDrainRequired := false checkForStreamQuota := func(it interface{}) bool { if t.streamQuota <= 0 { // Can go negative if server decreases it. if firstTry { @@ -712,8 +777,20 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea h := it.(*headerFrame) h.streamID = t.nextID t.nextID += 2 + + // Drain client transport if nextID > MaxStreamID which signals gRPC that + // the connection is closed and a new one must be created for subsequent RPCs. + transportDrainRequired = t.nextID > MaxStreamID + s.id = h.streamID s.fc = &inFlow{limit: uint32(t.initialWindowSize)} + t.mu.Lock() + if t.state == draining || t.activeStreams == nil { // Can be niled from Close(). + t.mu.Unlock() + return false // Don't create a stream if the transport is already closed. + } + t.activeStreams[s.id] = s + t.mu.Unlock() if t.streamQuota > 0 && t.waitingStreams > 0 { select { case t.streamsQuotaAvailable <- struct{}{}: @@ -739,52 +816,56 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea } for { success, err := t.controlBuf.executeAndPut(func(it interface{}) bool { - if !checkForStreamQuota(it) { - return false - } - if !checkForHeaderListSize(it) { - return false - } - return true + return checkForHeaderListSize(it) && checkForStreamQuota(it) }, hdr) if err != nil { - return nil, err + // Connection closed. + return nil, &NewStreamError{Err: err, AllowTransparentRetry: true} } if success { break } if hdrListSizeErr != nil { - return nil, &NewStreamError{Err: hdrListSizeErr, DoNotRetry: true} + return nil, &NewStreamError{Err: hdrListSizeErr} } firstTry = false select { case <-ch: - case <-s.ctx.Done(): - return nil, ContextErr(s.ctx.Err()) + case <-ctx.Done(): + return nil, &NewStreamError{Err: ContextErr(ctx.Err())} case <-t.goAway: - return nil, errStreamDrain + return nil, &NewStreamError{Err: errStreamDrain, AllowTransparentRetry: true} case <-t.ctx.Done(): - return nil, ErrConnClosing + return nil, &NewStreamError{Err: ErrConnClosing, AllowTransparentRetry: true} } } - if t.statsHandler != nil { + if len(t.statsHandlers) != 0 { header, ok := metadata.FromOutgoingContext(ctx) if ok { header.Set("user-agent", t.userAgent) } else { header = metadata.Pairs("user-agent", t.userAgent) } - // Note: The header fields are compressed with hpack after this call returns. - // No WireLength field is set here. - outHeader := &stats.OutHeader{ - Client: true, - FullMethod: callHdr.Method, - RemoteAddr: t.remoteAddr, - LocalAddr: t.localAddr, - Compression: callHdr.SendCompress, - Header: header, + for _, sh := range t.statsHandlers { + // Note: The header fields are compressed with hpack after this call returns. + // No WireLength field is set here. + // Note: Creating a new stats object to prevent pollution. + outHeader := &stats.OutHeader{ + Client: true, + FullMethod: callHdr.Method, + RemoteAddr: t.remoteAddr, + LocalAddr: t.localAddr, + Compression: callHdr.SendCompress, + Header: header, + } + sh.HandleRPC(s.ctx, outHeader) + } + } + if transportDrainRequired { + if t.logger.V(logLevel) { + t.logger.Infof("Draining transport: t.nextID > MaxStreamID") } - t.statsHandler.HandleRPC(s.ctx, outHeader) + t.GracefulClose() } return s, nil } @@ -867,20 +948,21 @@ func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2. // Close kicks off the shutdown process of the transport. This should be called // only once on a transport. Once it is called, the transport should not be // accessed any more. -// -// This method blocks until the addrConn that initiated this transport is -// re-connected. This happens because t.onClose() begins reconnect logic at the -// addrConn level and blocks until the addrConn is successfully connected. func (t *http2Client) Close(err error) { t.mu.Lock() - // Make sure we only Close once. + // Make sure we only close once. if t.state == closing { t.mu.Unlock() return } - // Call t.onClose before setting the state to closing to prevent the client - // from attempting to create new streams ASAP. - t.onClose() + if t.logger.V(logLevel) { + t.logger.Infof("Closing: %v", err) + } + // Call t.onClose ASAP to prevent the client from attempting to create new + // streams. + if t.state != draining { + t.onClose(GoAwayInvalid) + } t.state = closing streams := t.activeStreams t.activeStreams = nil @@ -893,9 +975,7 @@ func (t *http2Client) Close(err error) { t.controlBuf.finish() t.cancel() t.conn.Close() - if channelz.IsOn() { - channelz.RemoveEntry(t.channelzID) - } + channelz.RemoveEntry(t.channelzID) // Append info about previous goaways if there were any, since this may be important // for understanding the root cause for this connection to be closed. _, goAwayDebugMessage := t.GetGoAwayReason() @@ -912,11 +992,11 @@ func (t *http2Client) Close(err error) { for _, s := range streams { t.closeStream(s, err, false, http2.ErrCodeNo, st, nil, false) } - if t.statsHandler != nil { + for _, sh := range t.statsHandlers { connEnd := &stats.ConnEnd{ Client: true, } - t.statsHandler.HandleConn(t.ctx, connEnd) + sh.HandleConn(t.ctx, connEnd) } } @@ -932,11 +1012,15 @@ func (t *http2Client) GracefulClose() { t.mu.Unlock() return } + if t.logger.V(logLevel) { + t.logger.Infof("GracefulClose called") + } + t.onClose(GoAwayInvalid) t.state = draining active := len(t.activeStreams) t.mu.Unlock() if active == 0 { - t.Close(ErrConnClosing) + t.Close(connectionErrorf(true, nil, "no active streams left to process while draining")) return } t.controlBuf.put(&incomingGoAway{}) @@ -996,13 +1080,13 @@ func (t *http2Client) updateWindow(s *Stream, n uint32) { // for the transport and the stream based on the current bdp // estimation. func (t *http2Client) updateFlowControl(n uint32) { - t.mu.Lock() - for _, s := range t.activeStreams { - s.fc.newLimit(n) - } - t.mu.Unlock() updateIWS := func(interface{}) bool { t.initialWindowSize = int32(n) + t.mu.Lock() + for _, s := range t.activeStreams { + s.fc.newLimit(n) + } + t.mu.Unlock() return true } t.controlBuf.executeAndPut(updateIWS, &outgoingWindowUpdate{streamID: 0, increment: t.fc.newLimit(n)}) @@ -1077,7 +1161,7 @@ func (t *http2Client) handleData(f *http2.DataFrame) { } // The server has closed the stream without sending trailers. Record that // the read direction is closed, and set the status appropriately. - if f.FrameHeader.Flags.Has(http2.FlagDataEndStream) { + if f.StreamEnded() { t.closeStream(s, io.EOF, false, http2.ErrCodeNo, status.New(codes.Internal, "server closed the stream without sending trailers"), nil, true) } } @@ -1093,8 +1177,8 @@ func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) { } statusCode, ok := http2ErrConvTab[f.ErrCode] if !ok { - if logger.V(logLevel) { - logger.Warningf("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error %v", f.ErrCode) + if t.logger.V(logLevel) { + t.logger.Infof("Received a RST_STREAM frame with code %q, but found no mapped gRPC status", f.ErrCode) } statusCode = codes.Unknown } @@ -1176,10 +1260,12 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { t.mu.Unlock() return } - if f.ErrCode == http2.ErrCodeEnhanceYourCalm { - if logger.V(logLevel) { - logger.Infof("Client received GoAway with http2.ErrCodeEnhanceYourCalm.") - } + if f.ErrCode == http2.ErrCodeEnhanceYourCalm && string(f.DebugData()) == "too_many_pings" { + // When a client receives a GOAWAY with error code ENHANCE_YOUR_CALM and debug + // data equal to ASCII "too_many_pings", it should log the occurrence at a log level that is + // enabled by default and double the configure KEEPALIVE_TIME used for new connections + // on that channel. + logger.Errorf("Client received GoAway with error code ENHANCE_YOUR_CALM and debug data equal to ASCII \"too_many_pings\".") } id := f.LastStreamID if id > 0 && id%2 == 0 { @@ -1208,12 +1294,14 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { default: t.setGoAwayReason(f) close(t.goAway) - t.controlBuf.put(&incomingGoAway{}) + defer t.controlBuf.put(&incomingGoAway{}) // Defer as t.mu is currently held. // Notify the clientconn about the GOAWAY before we set the state to // draining, to allow the client to stop attempting to create streams // before disallowing new streams on this connection. - t.onGoAway(t.goAwayReason) - t.state = draining + if t.state != draining { + t.onClose(t.goAwayReason) + t.state = draining + } } // All streams with IDs greater than the GoAwayId // and smaller than the previous GoAway ID should be killed. @@ -1221,24 +1309,35 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { if upperLimit == 0 { // This is the first GoAway Frame. upperLimit = math.MaxUint32 // Kill all streams after the GoAway ID. } + + t.prevGoAwayID = id + if len(t.activeStreams) == 0 { + t.mu.Unlock() + t.Close(connectionErrorf(true, nil, "received goaway and there are no active streams")) + return + } + + streamsToClose := make([]*Stream, 0) for streamID, stream := range t.activeStreams { if streamID > id && streamID <= upperLimit { // The stream was unprocessed by the server. - atomic.StoreUint32(&stream.unprocessed, 1) - t.closeStream(stream, errStreamDrain, false, http2.ErrCodeNo, statusGoAway, nil, false) + if streamID > id && streamID <= upperLimit { + atomic.StoreUint32(&stream.unprocessed, 1) + streamsToClose = append(streamsToClose, stream) + } } } - t.prevGoAwayID = id - active := len(t.activeStreams) t.mu.Unlock() - if active == 0 { - t.Close(connectionErrorf(true, nil, "received goaway and there are no active streams")) + // Called outside t.mu because closeStream can take controlBuf's mu, which + // could induce deadlock and is not allowed. + for _, stream := range streamsToClose { + t.closeStream(stream, errStreamDrain, false, http2.ErrCodeNo, statusGoAway, nil, false) } } // setGoAwayReason sets the value of t.goAwayReason based // on the GoAway frame received. -// It expects a lock on transport's mutext to be held by +// It expects a lock on transport's mutex to be held by // the caller. func (t *http2Client) setGoAwayReason(f *http2.GoAwayFrame) { t.goAwayReason = GoAwayNoReason @@ -1407,26 +1506,6 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { } isHeader := false - defer func() { - if t.statsHandler != nil { - if isHeader { - inHeader := &stats.InHeader{ - Client: true, - WireLength: int(frame.Header().Length), - Header: s.header.Copy(), - Compression: s.recvCompress, - } - t.statsHandler.HandleRPC(s.ctx, inHeader) - } else { - inTrailer := &stats.InTrailer{ - Client: true, - WireLength: int(frame.Header().Length), - Trailer: s.trailer.Copy(), - } - t.statsHandler.HandleRPC(s.ctx, inTrailer) - } - } - }() // If headerChan hasn't been closed yet if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) { @@ -1448,6 +1527,25 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { close(s.headerChan) } + for _, sh := range t.statsHandlers { + if isHeader { + inHeader := &stats.InHeader{ + Client: true, + WireLength: int(frame.Header().Length), + Header: metadata.MD(mdata).Copy(), + Compression: s.recvCompress, + } + sh.HandleRPC(s.ctx, inHeader) + } else { + inTrailer := &stats.InTrailer{ + Client: true, + WireLength: int(frame.Header().Length), + Trailer: metadata.MD(mdata).Copy(), + } + sh.HandleRPC(s.ctx, inTrailer) + } + } + if !endStream { return } @@ -1461,33 +1559,35 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { t.closeStream(s, io.EOF, rst, http2.ErrCodeNo, statusGen, mdata, true) } -// reader runs as a separate goroutine in charge of reading data from network -// connection. -// -// TODO(zhaoq): currently one reader per transport. Investigate whether this is -// optimal. -// TODO(zhaoq): Check the validity of the incoming frame sequence. -func (t *http2Client) reader() { - defer close(t.readerDone) - // Check the validity of server preface. +// readServerPreface reads and handles the initial settings frame from the +// server. +func (t *http2Client) readServerPreface() error { frame, err := t.framer.fr.ReadFrame() if err != nil { - err = connectionErrorf(true, err, "error reading server preface: %v", err) - t.Close(err) // this kicks off resetTransport, so must be last before return - return - } - t.conn.SetReadDeadline(time.Time{}) // reset deadline once we get the settings frame (we didn't time out, yay!) - if t.keepaliveEnabled { - atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) + return connectionErrorf(true, err, "error reading server preface: %v", err) } sf, ok := frame.(*http2.SettingsFrame) if !ok { - // this kicks off resetTransport, so must be last before return - t.Close(connectionErrorf(true, nil, "initial http2 frame from server is not a settings frame: %T", frame)) - return + return connectionErrorf(true, nil, "initial http2 frame from server is not a settings frame: %T", frame) } - t.onPrefaceReceipt() t.handleSettings(sf, true) + return nil +} + +// reader verifies the server preface and reads all subsequent data from +// network connection. If the server preface is not read successfully, an +// error is pushed to errCh; otherwise errCh is closed with no error. +func (t *http2Client) reader(errCh chan<- error) { + defer close(t.readerDone) + + if err := t.readServerPreface(); err != nil { + errCh <- err + return + } + close(errCh) + if t.keepaliveEnabled { + atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) + } // loop to keep reading incoming messages on this transport. for { @@ -1553,7 +1653,7 @@ func minTime(a, b time.Duration) time.Duration { return b } -// keepalive running in a separate goroutune makes sure the connection is alive by sending pings. +// keepalive running in a separate goroutine makes sure the connection is alive by sending pings. func (t *http2Client) keepalive() { p := &ping{data: [8]byte{}} // True iff a ping has been sent, and no data has been received since then. @@ -1690,3 +1790,9 @@ func (t *http2Client) getOutFlowWindow() int64 { return -2 } } + +func (t *http2Client) stateForTesting() transportState { + t.mu.Lock() + defer t.mu.Unlock() + return t.state +} diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go index e3799d50a..ec4eef213 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -35,12 +35,16 @@ import ( "github.com/golang/protobuf/proto" "golang.org/x/net/http2" "golang.org/x/net/http2/hpack" + "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcutil" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/internal/syscall" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcrand" + "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" @@ -52,10 +56,10 @@ import ( var ( // ErrIllegalHeaderWrite indicates that setting header is illegal because of // the stream's state. - ErrIllegalHeaderWrite = errors.New("transport: the stream is done or WriteHeader was already called") + ErrIllegalHeaderWrite = status.Error(codes.Internal, "transport: SendHeader called multiple times") // ErrHeaderListSizeLimitViolation indicates that the header list size is larger // than the limit set by peer. - ErrHeaderListSizeLimitViolation = errors.New("transport: trying to send header list size larger than the limit set by peer") + ErrHeaderListSizeLimitViolation = status.Error(codes.Internal, "transport: trying to send header list size larger than the limit set by peer") ) // serverConnectionCounter counts the number of connections a server has seen @@ -73,7 +77,6 @@ type http2Server struct { writerDone chan struct{} // sync point to enable testing. remoteAddr net.Addr localAddr net.Addr - maxStreamID uint32 // max stream ID ever seen authInfo credentials.AuthInfo // auth info about the connection inTapHandle tap.ServerInHandle framer *framer @@ -83,7 +86,7 @@ type http2Server struct { // updates, reset streams, and various settings) to the controller. controlBuf *controlBuffer fc *trInFlow - stats stats.Handler + stats []stats.Handler // Keepalive and max-age parameters for the server. kp keepalive.ServerParameters // Keepalive enforcement policy. @@ -102,13 +105,13 @@ type http2Server struct { mu sync.Mutex // guard the following - // drainChan is initialized when Drain() is called the first time. - // After which the server writes out the first GoAway(with ID 2^31-1) frame. - // Then an independent goroutine will be launched to later send the second GoAway. - // During this time we don't want to write another first GoAway(with ID 2^31 -1) frame. - // Thus call to Drain() will be a no-op if drainChan is already initialized since draining is - // already underway. - drainChan chan struct{} + // drainEvent is initialized when Drain() is called the first time. After + // which the server writes out the first GoAway(with ID 2^31-1) frame. Then + // an independent goroutine will be launched to later send the second + // GoAway. During this time we don't want to write another first GoAway(with + // ID 2^31 -1) frame. Thus call to Drain() will be a no-op if drainEvent is + // already initialized since draining is already underway. + drainEvent *grpcsync.Event state transportState activeStreams map[uint32]*Stream // idle is the time instant when the connection went idle. @@ -118,21 +121,44 @@ type http2Server struct { idle time.Time // Fields below are for channelz metric collection. - channelzID int64 // channelz unique identification number + channelzID *channelz.Identifier czData *channelzData bufferPool *bufferPool connectionID uint64 + + // maxStreamMu guards the maximum stream ID + // This lock may not be taken if mu is already held. + maxStreamMu sync.Mutex + maxStreamID uint32 // max stream ID ever seen + + logger *grpclog.PrefixLogger } // NewServerTransport creates a http2 transport with conn and configuration // options from config. // // It returns a non-nil transport and a nil error on success. On failure, it -// returns a non-nil transport and a nil-error. For a special case where the +// returns a nil transport and a non-nil error. For a special case where the // underlying conn gets closed before the client preface could be read, it // returns a nil transport and a nil error. func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, err error) { + var authInfo credentials.AuthInfo + rawConn := conn + if config.Credentials != nil { + var err error + conn, authInfo, err = config.Credentials.ServerHandshake(rawConn) + if err != nil { + // ErrConnDispatched means that the connection was dispatched away + // from gRPC; those connections should be left open. io.EOF means + // the connection was closed before handshaking completed, which can + // happen naturally from probers. Return these errors directly. + if err == credentials.ErrConnDispatched || err == io.EOF { + return nil, err + } + return nil, connectionErrorf(false, err, "ServerHandshake(%q) failed: %v", rawConn.RemoteAddr(), err) + } + } writeBufSize := config.WriteBufferSize readBufSize := config.ReadBufferSize maxHeaderListSize := defaultServerMaxHeaderListSize @@ -145,15 +171,10 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, ID: http2.SettingMaxFrameSize, Val: http2MaxFrameLen, }} - // TODO(zhaoq): Have a better way to signal "no limit" because 0 is - // permitted in the HTTP2 spec. - maxStreams := config.MaxStreams - if maxStreams == 0 { - maxStreams = math.MaxUint32 - } else { + if config.MaxStreams != math.MaxUint32 { isettings = append(isettings, http2.Setting{ ID: http2.SettingMaxConcurrentStreams, - Val: maxStreams, + Val: config.MaxStreams, }) } dynamicWindow := true @@ -211,27 +232,33 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, if kp.Timeout == 0 { kp.Timeout = defaultServerKeepaliveTimeout } + if kp.Time != infinity { + if err = syscall.SetTCPUserTimeout(conn, kp.Timeout); err != nil { + return nil, connectionErrorf(false, err, "transport: failed to set TCP_USER_TIMEOUT: %v", err) + } + } kep := config.KeepalivePolicy if kep.MinTime == 0 { kep.MinTime = defaultKeepalivePolicyMinTime } + done := make(chan struct{}) t := &http2Server{ - ctx: context.Background(), + ctx: setConnection(context.Background(), rawConn), done: done, conn: conn, remoteAddr: conn.RemoteAddr(), localAddr: conn.LocalAddr(), - authInfo: config.AuthInfo, + authInfo: authInfo, framer: framer, readerDone: make(chan struct{}), writerDone: make(chan struct{}), - maxStreams: maxStreams, + maxStreams: config.MaxStreams, inTapHandle: config.InTapHandle, fc: &trInFlow{limit: uint32(icwz)}, state: reachable, activeStreams: make(map[uint32]*Stream), - stats: config.StatsHandler, + stats: config.StatsHandlers, kp: kp, idle: time.Now(), kep: kep, @@ -239,6 +266,10 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, czData: new(channelzData), bufferPool: newBufferPool(), } + t.logger = prefixLoggerForServerTransport(t) + // Add peer information to the http2server context. + t.ctx = peer.NewContext(t.ctx, t.getPeer()) + t.controlBuf = newControlBuffer(t.done) if dynamicWindow { t.bdpEst = &bdpEstimator{ @@ -246,25 +277,25 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, updateFlowControl: t.updateFlowControl, } } - if t.stats != nil { - t.ctx = t.stats.TagConn(t.ctx, &stats.ConnTagInfo{ + for _, sh := range t.stats { + t.ctx = sh.TagConn(t.ctx, &stats.ConnTagInfo{ RemoteAddr: t.remoteAddr, LocalAddr: t.localAddr, }) connBegin := &stats.ConnBegin{} - t.stats.HandleConn(t.ctx, connBegin) + sh.HandleConn(t.ctx, connBegin) } - if channelz.IsOn() { - t.channelzID = channelz.RegisterNormalSocket(t, config.ChannelzParentID, fmt.Sprintf("%s -> %s", t.remoteAddr, t.localAddr)) + t.channelzID, err = channelz.RegisterNormalSocket(t, config.ChannelzParentID, fmt.Sprintf("%s -> %s", t.remoteAddr, t.localAddr)) + if err != nil { + return nil, err } t.connectionID = atomic.AddUint64(&serverConnectionCounter, 1) - t.framer.writer.Flush() defer func() { if err != nil { - t.Close() + t.Close(err) } }() @@ -273,10 +304,11 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, if _, err := io.ReadFull(t.conn, preface); err != nil { // In deployments where a gRPC server runs behind a cloud load balancer // which performs regular TCP level health checks, the connection is - // closed immediately by the latter. Skipping the error here will help - // reduce log clutter. + // closed immediately by the latter. Returning io.EOF here allows the + // grpc server implementation to recognize this scenario and suppress + // logging to reduce spam. if err == io.EOF { - return nil, nil + return nil, io.EOF } return nil, connectionErrorf(false, err, "transport: http2Server.HandleStreams failed to receive the preface from client: %v", err) } @@ -299,23 +331,22 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, t.handleSettings(sf) go func() { - t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst) + t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger) t.loopy.ssGoAwayHandler = t.outgoingGoAwayHandler - if err := t.loopy.run(); err != nil { - if logger.V(logLevel) { - logger.Errorf("transport: loopyWriter.run returning. Err: %v", err) - } - } - t.conn.Close() - t.controlBuf.finish() + t.loopy.run() close(t.writerDone) }() go t.keepalive() return t, nil } -// operateHeader takes action on the decoded headers. -func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) (fatal bool) { +// operateHeaders takes action on the decoded headers. Returns an error if fatal +// error encountered and transport needs to close, otherwise returns nil. +func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) error { + // Acquire max stream ID lock for entire duration + t.maxStreamMu.Lock() + defer t.maxStreamMu.Unlock() + streamID := frame.Header().StreamID // frame.Truncated is set to true when framer detects that the current header @@ -327,9 +358,15 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( rstCode: http2.ErrCodeFrameSize, onWrite: func() {}, }) - return false + return nil } + if streamID%2 != 1 || streamID <= t.maxStreamID { + // illegal gRPC stream id. + return fmt.Errorf("received an illegal stream id: %v. headers frame: %+v", streamID, frame) + } + t.maxStreamID = streamID + buf := newRecvBuffer() s := &Stream{ id: streamID, @@ -337,15 +374,15 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( buf: buf, fc: &inFlow{limit: uint32(t.initialWindowSize)}, } - var ( - // If a gRPC Response-Headers has already been received, then it means - // that the peer is speaking gRPC and we are in gRPC mode. - isGRPC = false - mdata = make(map[string][]string) - httpMethod string - // headerError is set if an error is encountered while parsing the headers - headerError bool + // if false, content-type was missing or invalid + isGRPC = false + contentType = "" + mdata = make(metadata.MD, len(frame.Fields)) + httpMethod string + // these are set if an error is encountered while parsing the headers + protocolError bool + headerError *status.Status timeoutSet bool timeout time.Duration @@ -356,11 +393,23 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( case "content-type": contentSubtype, validContentType := grpcutil.ContentSubtype(hf.Value) if !validContentType { + contentType = hf.Value break } mdata[hf.Name] = append(mdata[hf.Name], hf.Value) s.contentSubtype = contentSubtype isGRPC = true + + case "grpc-accept-encoding": + mdata[hf.Name] = append(mdata[hf.Name], hf.Value) + if hf.Value == "" { + continue + } + compressors := hf.Value + if s.clientAdvertisedCompressors != "" { + compressors = s.clientAdvertisedCompressors + "," + compressors + } + s.clientAdvertisedCompressors = compressors case "grpc-encoding": s.recvCompress = hf.Value case ":method": @@ -371,30 +420,90 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( timeoutSet = true var err error if timeout, err = decodeTimeout(hf.Value); err != nil { - headerError = true + headerError = status.Newf(codes.Internal, "malformed grpc-timeout: %v", err) } + // "Transports must consider requests containing the Connection header + // as malformed." - A41 + case "connection": + if t.logger.V(logLevel) { + t.logger.Infof("Received a HEADERS frame with a :connection header which makes the request malformed, as per the HTTP/2 spec") + } + protocolError = true default: if isReservedHeader(hf.Name) && !isWhitelistedHeader(hf.Name) { break } v, err := decodeMetadataHeader(hf.Name, hf.Value) if err != nil { - headerError = true - logger.Warningf("Failed to decode metadata header (%q, %q): %v", hf.Name, hf.Value, err) + headerError = status.Newf(codes.Internal, "malformed binary metadata %q in header %q: %v", hf.Value, hf.Name, err) + t.logger.Warningf("Failed to decode metadata header (%q, %q): %v", hf.Name, hf.Value, err) break } mdata[hf.Name] = append(mdata[hf.Name], v) } } - if !isGRPC || headerError { + // "If multiple Host headers or multiple :authority headers are present, the + // request must be rejected with an HTTP status code 400 as required by Host + // validation in RFC 7230 §5.4, gRPC status code INTERNAL, or RST_STREAM + // with HTTP/2 error code PROTOCOL_ERROR." - A41. Since this is a HTTP/2 + // error, this takes precedence over a client not speaking gRPC. + if len(mdata[":authority"]) > 1 || len(mdata["host"]) > 1 { + errMsg := fmt.Sprintf("num values of :authority: %v, num values of host: %v, both must only have 1 value as per HTTP/2 spec", len(mdata[":authority"]), len(mdata["host"])) + if t.logger.V(logLevel) { + t.logger.Infof("Aborting the stream early: %v", errMsg) + } + t.controlBuf.put(&earlyAbortStream{ + httpStatus: http.StatusBadRequest, + streamID: streamID, + contentSubtype: s.contentSubtype, + status: status.New(codes.Internal, errMsg), + rst: !frame.StreamEnded(), + }) + return nil + } + + if protocolError { t.controlBuf.put(&cleanupStream{ streamID: streamID, rst: true, rstCode: http2.ErrCodeProtocol, onWrite: func() {}, }) - return false + return nil + } + if !isGRPC { + t.controlBuf.put(&earlyAbortStream{ + httpStatus: http.StatusUnsupportedMediaType, + streamID: streamID, + contentSubtype: s.contentSubtype, + status: status.Newf(codes.InvalidArgument, "invalid gRPC request content-type %q", contentType), + rst: !frame.StreamEnded(), + }) + return nil + } + if headerError != nil { + t.controlBuf.put(&earlyAbortStream{ + httpStatus: http.StatusBadRequest, + streamID: streamID, + contentSubtype: s.contentSubtype, + status: headerError, + rst: !frame.StreamEnded(), + }) + return nil + } + + // "If :authority is missing, Host must be renamed to :authority." - A41 + if len(mdata[":authority"]) == 0 { + // No-op if host isn't present, no eventual :authority header is a valid + // RPC. + if host, ok := mdata["host"]; ok { + mdata[":authority"] = host + delete(mdata, "host") + } + } else { + // "If :authority is present, Host must be discarded" - A41 + delete(mdata, "host") } if frame.StreamEnded() { @@ -406,14 +515,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( } else { s.ctx, s.cancel = context.WithCancel(t.ctx) } - pr := &peer.Peer{ - Addr: t.remoteAddr, - } - // Attach Auth info if there is any. - if t.authInfo != nil { - pr.AuthInfo = t.authInfo - } - s.ctx = peer.NewContext(s.ctx, pr) + // Attach the received metadata to the context. if len(mdata) > 0 { s.ctx = metadata.NewIncomingContext(s.ctx, mdata) @@ -428,7 +530,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( if t.state != reachable { t.mu.Unlock() s.cancel() - return false + return nil } if uint32(len(t.activeStreams)) >= t.maxStreams { t.mu.Unlock() @@ -439,49 +541,43 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( onWrite: func() {}, }) s.cancel() - return false - } - if streamID%2 != 1 || streamID <= t.maxStreamID { - t.mu.Unlock() - // illegal gRPC stream id. - if logger.V(logLevel) { - logger.Errorf("transport: http2Server.HandleStreams received an illegal stream id: %v", streamID) - } - s.cancel() - return true + return nil } - t.maxStreamID = streamID if httpMethod != http.MethodPost { t.mu.Unlock() - if logger.V(logLevel) { - logger.Infof("transport: http2Server.operateHeaders parsed a :method field: %v which should be POST", httpMethod) + errMsg := fmt.Sprintf("Received a HEADERS frame with :method %q which should be POST", httpMethod) + if t.logger.V(logLevel) { + t.logger.Infof("Aborting the stream early: %v", errMsg) } - t.controlBuf.put(&cleanupStream{ - streamID: streamID, - rst: true, - rstCode: http2.ErrCodeProtocol, - onWrite: func() {}, + t.controlBuf.put(&earlyAbortStream{ + httpStatus: 405, + streamID: streamID, + contentSubtype: s.contentSubtype, + status: status.New(codes.Internal, errMsg), + rst: !frame.StreamEnded(), }) s.cancel() - return false + return nil } if t.inTapHandle != nil { var err error if s.ctx, err = t.inTapHandle(s.ctx, &tap.Info{FullMethodName: s.method}); err != nil { t.mu.Unlock() - if logger.V(logLevel) { - logger.Infof("transport: http2Server.operateHeaders got an error from InTapHandle: %v", err) + if t.logger.V(logLevel) { + t.logger.Infof("Aborting the stream early due to InTapHandle failure: %v", err) } stat, ok := status.FromError(err) if !ok { stat = status.New(codes.PermissionDenied, err.Error()) } t.controlBuf.put(&earlyAbortStream{ + httpStatus: 200, streamID: s.id, contentSubtype: s.contentSubtype, status: stat, + rst: !frame.StreamEnded(), }) - return false + return nil } } t.activeStreams[streamID] = s @@ -497,17 +593,17 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( t.adjustWindow(s, uint32(n)) } s.ctx = traceCtx(s.ctx, s.method) - if t.stats != nil { - s.ctx = t.stats.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method}) + for _, sh := range t.stats { + s.ctx = sh.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method}) inHeader := &stats.InHeader{ FullMethod: s.method, RemoteAddr: t.remoteAddr, LocalAddr: t.localAddr, Compression: s.recvCompress, WireLength: int(frame.Header().Length), - Header: metadata.MD(mdata).Copy(), + Header: mdata.Copy(), } - t.stats.HandleRPC(s.ctx, inHeader) + sh.HandleRPC(s.ctx, inHeader) } s.ctxDone = s.ctx.Done() s.wq = newWriteQuota(defaultWriteQuota, s.ctxDone) @@ -528,7 +624,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( wq: s.wq, }) handle(s) - return false + return nil } // HandleStreams receives incoming streams using the given handler. This is @@ -542,8 +638,8 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context. atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) if err != nil { if se, ok := err.(http2.StreamError); ok { - if logger.V(logLevel) { - logger.Warningf("transport: http2Server.HandleStreams encountered http2.StreamError: %v", se) + if t.logger.V(logLevel) { + t.logger.Warningf("Encountered http2.StreamError: %v", se) } t.mu.Lock() s := t.activeStreams[se.StreamID] @@ -561,19 +657,16 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context. continue } if err == io.EOF || err == io.ErrUnexpectedEOF { - t.Close() + t.Close(err) return } - if logger.V(logLevel) { - logger.Warningf("transport: http2Server.HandleStreams failed to read frame: %v", err) - } - t.Close() + t.Close(err) return } switch frame := frame.(type) { case *http2.MetaHeadersFrame: - if t.operateHeaders(frame, handle, traceCtx) { - t.Close() + if err := t.operateHeaders(frame, handle, traceCtx); err != nil { + t.Close(err) break } case *http2.DataFrame: @@ -589,8 +682,8 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context. case *http2.GoAwayFrame: // TODO: Handle GoAway from the client appropriately. default: - if logger.V(logLevel) { - logger.Errorf("transport: http2Server.HandleStreams found unhandled frame type %v.", frame) + if t.logger.V(logLevel) { + t.logger.Infof("Received unsupported frame type %T", frame) } } } @@ -717,7 +810,7 @@ func (t *http2Server) handleData(f *http2.DataFrame) { s.write(recvMsg{buffer: buffer}) } } - if f.Header().Flags.Has(http2.FlagDataEndStream) { + if f.StreamEnded() { // Received the end of stream from the client. s.compareAndSwapState(streamActive, streamReadDone) s.write(recvMsg{err: io.EOF}) @@ -774,8 +867,8 @@ const ( func (t *http2Server) handlePing(f *http2.PingFrame) { if f.IsAck() { - if f.Data == goAwayPing.data && t.drainChan != nil { - close(t.drainChan) + if f.Data == goAwayPing.data && t.drainEvent != nil { + t.drainEvent.Fire() return } // Maybe it's a BDP ping. @@ -817,10 +910,7 @@ func (t *http2Server) handlePing(f *http2.PingFrame) { if t.pingStrikes > maxPingStrikes { // Send goaway and close the connection. - if logger.V(logLevel) { - logger.Errorf("transport: Got too many pings from the client, closing the connection.") - } - t.controlBuf.put(&goAway{code: http2.ErrCodeEnhanceYourCalm, debugData: []byte("too_many_pings"), closeConn: true}) + t.controlBuf.put(&goAway{code: http2.ErrCodeEnhanceYourCalm, debugData: []byte("too_many_pings"), closeConn: errors.New("got too many pings from the client")}) } } @@ -852,8 +942,8 @@ func (t *http2Server) checkForHeaderListSize(it interface{}) bool { var sz int64 for _, f := range hdrFrame.hf { if sz += int64(f.Size()); sz > int64(*t.maxSendHeaderListSize) { - if logger.V(logLevel) { - logger.Errorf("header list size to send violates the maximum size (%d bytes) set by client", *t.maxSendHeaderListSize) + if t.logger.V(logLevel) { + t.logger.Infof("Header list size to send violates the maximum size (%d bytes) set by client", *t.maxSendHeaderListSize) } return false } @@ -861,12 +951,27 @@ func (t *http2Server) checkForHeaderListSize(it interface{}) bool { return true } +func (t *http2Server) streamContextErr(s *Stream) error { + select { + case <-t.done: + return ErrConnClosing + default: + } + return ContextErr(s.ctx.Err()) +} + // WriteHeader sends the header metadata md back to the client. func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { - if s.updateHeaderSent() || s.getState() == streamDone { + s.hdrMu.Lock() + defer s.hdrMu.Unlock() + if s.getState() == streamDone { + return t.streamContextErr(s) + } + + if s.updateHeaderSent() { return ErrIllegalHeaderWrite } - s.hdrMu.Lock() + if md.Len() > 0 { if s.header.Len() > 0 { s.header = metadata.Join(s.header, md) @@ -875,10 +980,8 @@ func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { } } if err := t.writeHeaderLocked(s); err != nil { - s.hdrMu.Unlock() - return err + return status.Convert(err).Err() } - s.hdrMu.Unlock() return nil } @@ -909,14 +1012,14 @@ func (t *http2Server) writeHeaderLocked(s *Stream) error { t.closeStream(s, true, http2.ErrCodeInternal, false) return ErrHeaderListSizeLimitViolation } - if t.stats != nil { + for _, sh := range t.stats { // Note: Headers are compressed with hpack after this call returns. // No WireLength field is set here. outHeader := &stats.OutHeader{ Header: s.header.Copy(), Compression: s.sendCompress, } - t.stats.HandleRPC(s.Context(), outHeader) + sh.HandleRPC(s.Context(), outHeader) } return nil } @@ -926,17 +1029,19 @@ func (t *http2Server) writeHeaderLocked(s *Stream) error { // TODO(zhaoq): Now it indicates the end of entire stream. Revisit if early // OK is adopted. func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { + s.hdrMu.Lock() + defer s.hdrMu.Unlock() + if s.getState() == streamDone { return nil } - s.hdrMu.Lock() + // TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields // first and create a slice of that exact size. headerFields := make([]hpack.HeaderField, 0, 2) // grpc-status and grpc-message will be there if none else. if !s.updateHeaderSent() { // No headers have been sent. if len(s.header) > 0 { // Send a separate header frame. if err := t.writeHeaderLocked(s); err != nil { - s.hdrMu.Unlock() return err } } else { // Send a trailer only response. @@ -951,7 +1056,7 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { stBytes, err := proto.Marshal(p) if err != nil { // TODO: return error instead, when callers are able to handle it. - logger.Errorf("transport: failed to marshal rpc status: %v, error: %v", p, err) + t.logger.Errorf("Failed to marshal rpc status: %s, error: %v", pretty.ToJSON(p), err) } else { headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status-details-bin", Value: encodeBinHeader(stBytes)}) } @@ -965,7 +1070,7 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { endStream: true, onWrite: t.setResetPingStrikes, } - s.hdrMu.Unlock() + success, err := t.controlBuf.execute(t.checkForHeaderListSize, trailingHeader) if !success { if err != nil { @@ -977,10 +1082,10 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { // Send a RST_STREAM after the trailers if the client has not already half-closed. rst := s.getState() == streamActive t.finishStream(s, rst, http2.ErrCodeNo, trailingHeader, true) - if t.stats != nil { + for _, sh := range t.stats { // Note: The trailer fields are compressed with hpack after this call returns. // No WireLength field is set here. - t.stats.HandleRPC(s.Context(), &stats.OutTrailer{ + sh.HandleRPC(s.Context(), &stats.OutTrailer{ Trailer: s.trailer.Copy(), }) } @@ -992,23 +1097,12 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { if !s.isHeaderSent() { // Headers haven't been written yet. if err := t.WriteHeader(s, nil); err != nil { - if _, ok := err.(ConnectionError); ok { - return err - } - // TODO(mmukhi, dfawley): Make sure this is the right code to return. - return status.Errorf(codes.Internal, "transport: %v", err) + return err } } else { // Writing headers checks for this condition. if s.getState() == streamDone { - // TODO(mmukhi, dfawley): Should the server write also return io.EOF? - s.cancel() - select { - case <-t.done: - return ErrConnClosing - default: - } - return ContextErr(s.ctx.Err()) + return t.streamContextErr(s) } } df := &dataFrame{ @@ -1018,12 +1112,7 @@ func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) e onEachWrite: t.setResetPingStrikes, } if err := s.wq.get(int32(len(hdr) + len(data))); err != nil { - select { - case <-t.done: - return ErrConnClosing - default: - } - return ContextErr(s.ctx.Err()) + return t.streamContextErr(s) } return t.controlBuf.put(df) } @@ -1072,20 +1161,20 @@ func (t *http2Server) keepalive() { if val <= 0 { // The connection has been idle for a duration of keepalive.MaxConnectionIdle or more. // Gracefully close the connection. - t.Drain() + t.Drain("max_idle") return } idleTimer.Reset(val) case <-ageTimer.C: - t.Drain() + t.Drain("max_age") ageTimer.Reset(t.kp.MaxConnectionAgeGrace) select { case <-ageTimer.C: // Close the connection after grace period. - if logger.V(logLevel) { - logger.Infof("transport: closing server transport due to maximum connection age.") + if t.logger.V(logLevel) { + t.logger.Infof("Closing server transport due to maximum connection age") } - t.Close() + t.controlBuf.put(closeConnection{}) case <-t.done: } return @@ -1101,10 +1190,7 @@ func (t *http2Server) keepalive() { continue } if outstandingPing && kpTimeoutLeft <= 0 { - if logger.V(logLevel) { - logger.Infof("transport: closing server transport due to idleness.") - } - t.Close() + t.Close(fmt.Errorf("keepalive ping not acked within timeout %s", t.kp.Time)) return } if !outstandingPing { @@ -1131,40 +1217,37 @@ func (t *http2Server) keepalive() { // Close starts shutting down the http2Server transport. // TODO(zhaoq): Now the destruction is not blocked on any pending streams. This // could cause some resource issue. Revisit this later. -func (t *http2Server) Close() { +func (t *http2Server) Close(err error) { t.mu.Lock() if t.state == closing { t.mu.Unlock() return } + if t.logger.V(logLevel) { + t.logger.Infof("Closing: %v", err) + } t.state = closing streams := t.activeStreams t.activeStreams = nil t.mu.Unlock() t.controlBuf.finish() close(t.done) - if err := t.conn.Close(); err != nil && logger.V(logLevel) { - logger.Infof("transport: error closing conn during Close: %v", err) - } - if channelz.IsOn() { - channelz.RemoveEntry(t.channelzID) + if err := t.conn.Close(); err != nil && t.logger.V(logLevel) { + t.logger.Infof("Error closing underlying net.Conn during Close: %v", err) } + channelz.RemoveEntry(t.channelzID) // Cancel all active streams. for _, s := range streams { s.cancel() } - if t.stats != nil { + for _, sh := range t.stats { connEnd := &stats.ConnEnd{} - t.stats.HandleConn(t.ctx, connEnd) + sh.HandleConn(t.ctx, connEnd) } } // deleteStream deletes the stream s from transport's active streams. func (t *http2Server) deleteStream(s *Stream, eosReceived bool) { - // In case stream sending and receiving are invoked in separate - // goroutines (e.g., bi-directional streaming), cancel needs to be - // called to interrupt the potential blocking on other goroutines. - s.cancel() t.mu.Lock() if _, ok := t.activeStreams[s.id]; ok { @@ -1186,6 +1269,11 @@ func (t *http2Server) deleteStream(s *Stream, eosReceived bool) { // finishStream closes the stream and puts the trailing headerFrame into controlbuf. func (t *http2Server) finishStream(s *Stream, rst bool, rstCode http2.ErrCode, hdr *headerFrame, eosReceived bool) { + // In case stream sending and receiving are invoked in separate + // goroutines (e.g., bi-directional streaming), cancel needs to be + // called to interrupt the potential blocking on other goroutines. + s.cancel() + oldState := s.swapState(streamDone) if oldState == streamDone { // If the stream was already done, return. @@ -1205,6 +1293,11 @@ func (t *http2Server) finishStream(s *Stream, rst bool, rstCode http2.ErrCode, h // closeStream clears the footprint of a stream when the stream is not needed any more. func (t *http2Server) closeStream(s *Stream, rst bool, rstCode http2.ErrCode, eosReceived bool) { + // In case stream sending and receiving are invoked in separate + // goroutines (e.g., bi-directional streaming), cancel needs to be + // called to interrupt the potential blocking on other goroutines. + s.cancel() + s.swapState(streamDone) t.deleteStream(s, eosReceived) @@ -1220,14 +1313,14 @@ func (t *http2Server) RemoteAddr() net.Addr { return t.remoteAddr } -func (t *http2Server) Drain() { +func (t *http2Server) Drain(debugData string) { t.mu.Lock() defer t.mu.Unlock() - if t.drainChan != nil { + if t.drainEvent != nil { return } - t.drainChan = make(chan struct{}) - t.controlBuf.put(&goAway{code: http2.ErrCodeNo, debugData: []byte{}, headsUp: true}) + t.drainEvent = grpcsync.NewEvent() + t.controlBuf.put(&goAway{code: http2.ErrCodeNo, debugData: []byte(debugData), headsUp: true}) } var goAwayPing = &ping{data: [8]byte{1, 6, 1, 8, 0, 3, 3, 9}} @@ -1235,39 +1328,41 @@ var goAwayPing = &ping{data: [8]byte{1, 6, 1, 8, 0, 3, 3, 9}} // Handles outgoing GoAway and returns true if loopy needs to put itself // in draining mode. func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) { + t.maxStreamMu.Lock() t.mu.Lock() if t.state == closing { // TODO(mmukhi): This seems unnecessary. t.mu.Unlock() + t.maxStreamMu.Unlock() // The transport is closing. return false, ErrConnClosing } - sid := t.maxStreamID if !g.headsUp { // Stop accepting more streams now. t.state = draining + sid := t.maxStreamID + retErr := g.closeConn if len(t.activeStreams) == 0 { - g.closeConn = true + retErr = errors.New("second GOAWAY written and no active streams left to process") } t.mu.Unlock() + t.maxStreamMu.Unlock() if err := t.framer.fr.WriteGoAway(sid, g.code, g.debugData); err != nil { return false, err } - if g.closeConn { - // Abruptly close the connection following the GoAway (via - // loopywriter). But flush out what's inside the buffer first. - t.framer.writer.Flush() - return false, fmt.Errorf("transport: Connection closing") + if retErr != nil { + return false, retErr } return true, nil } t.mu.Unlock() + t.maxStreamMu.Unlock() // For a graceful close, send out a GoAway with stream ID of MaxUInt32, // Follow that with a ping and wait for the ack to come back or a timer // to expire. During this time accept new streams since they might have // originated before the GoAway reaches the client. // After getting the ack or timer expiration send out another GoAway this // time with an ID of the max stream server intends to process. - if err := t.framer.fr.WriteGoAway(math.MaxUint32, http2.ErrCodeNo, []byte{}); err != nil { + if err := t.framer.fr.WriteGoAway(math.MaxUint32, http2.ErrCodeNo, g.debugData); err != nil { return false, err } if err := t.framer.fr.WritePing(false, goAwayPing.data); err != nil { @@ -1277,7 +1372,7 @@ func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) { timer := time.NewTimer(time.Minute) defer timer.Stop() select { - case <-t.drainChan: + case <-t.drainEvent.Done(): case <-timer.C: case <-t.done: return @@ -1336,6 +1431,13 @@ func (t *http2Server) getOutFlowWindow() int64 { } } +func (t *http2Server) getPeer() *peer.Peer { + return &peer.Peer{ + Addr: t.remoteAddr, + AuthInfo: t.authInfo, // Can be nil + } +} + func getJitter(v time.Duration) time.Duration { if v == infinity { return 0 @@ -1345,3 +1447,18 @@ func getJitter(v time.Duration) time.Duration { j := grpcrand.Int63n(2*r) - r return time.Duration(j) } + +type connectionKey struct{} + +// GetConnection gets the connection from the context. +func GetConnection(ctx context.Context) net.Conn { + conn, _ := ctx.Value(connectionKey{}).(net.Conn) + return conn +} + +// SetConnection adds the connection to the context to be able to get +// information about the destination ip and port for an incoming RPC. This also +// allows any unary or streaming interceptors to see the connection. +func setConnection(ctx context.Context, conn net.Conn) context.Context { + return context.WithValue(ctx, connectionKey{}, conn) +} diff --git a/vendor/google.golang.org/grpc/internal/transport/http_util.go b/vendor/google.golang.org/grpc/internal/transport/http_util.go index d8247bcdf..19cbb18f5 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http_util.go +++ b/vendor/google.golang.org/grpc/internal/transport/http_util.go @@ -20,8 +20,8 @@ package transport import ( "bufio" - "bytes" "encoding/base64" + "errors" "fmt" "io" "math" @@ -38,21 +38,14 @@ import ( "golang.org/x/net/http2/hpack" spb "google.golang.org/genproto/googleapis/rpc/status" "google.golang.org/grpc/codes" - "google.golang.org/grpc/grpclog" "google.golang.org/grpc/status" ) const ( // http2MaxFrameLen specifies the max length of a HTTP2 frame. http2MaxFrameLen = 16384 // 16KB frame - // http://http2.github.io/http2-spec/#SettingValues + // https://httpwg.org/specs/rfc7540.html#SettingValues http2InitHeaderTableSize = 4096 - // baseContentType is the base content-type for gRPC. This is a valid - // content-type on it's own, but can also include a content-subtype such as - // "proto" as a suffix after "+" or ";". See - // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests - // for more details. - ) var ( @@ -92,7 +85,6 @@ var ( // 504 Gateway timeout - UNAVAILABLE. http.StatusGatewayTimeout: codes.Unavailable, } - logger = grpclog.Component("transport") ) // isReservedHeader checks whether hdr belongs to HTTP2 headers @@ -257,13 +249,13 @@ func encodeGrpcMessage(msg string) string { } func encodeGrpcMessageUnchecked(msg string) string { - var buf bytes.Buffer + var sb strings.Builder for len(msg) > 0 { r, size := utf8.DecodeRuneInString(msg) for _, b := range []byte(string(r)) { if size > 1 { // If size > 1, r is not ascii. Always do percent encoding. - buf.WriteString(fmt.Sprintf("%%%02X", b)) + fmt.Fprintf(&sb, "%%%02X", b) continue } @@ -272,14 +264,14 @@ func encodeGrpcMessageUnchecked(msg string) string { // // fmt.Sprintf("%%%02X", utf8.RuneError) gives "%FFFD". if b >= spaceByte && b <= tildeByte && b != percentByte { - buf.WriteByte(b) + sb.WriteByte(b) } else { - buf.WriteString(fmt.Sprintf("%%%02X", b)) + fmt.Fprintf(&sb, "%%%02X", b) } } msg = msg[size:] } - return buf.String() + return sb.String() } // decodeGrpcMessage decodes the msg encoded by encodeGrpcMessage. @@ -297,23 +289,23 @@ func decodeGrpcMessage(msg string) string { } func decodeGrpcMessageUnchecked(msg string) string { - var buf bytes.Buffer + var sb strings.Builder lenMsg := len(msg) for i := 0; i < lenMsg; i++ { c := msg[i] if c == percentByte && i+2 < lenMsg { parsed, err := strconv.ParseUint(msg[i+1:i+3], 16, 8) if err != nil { - buf.WriteByte(c) + sb.WriteByte(c) } else { - buf.WriteByte(byte(parsed)) + sb.WriteByte(byte(parsed)) i += 2 } } else { - buf.WriteByte(c) + sb.WriteByte(c) } } - return buf.String() + return sb.String() } type bufWriter struct { @@ -322,8 +314,6 @@ type bufWriter struct { batchSize int conn net.Conn err error - - onFlush func() } func newBufWriter(conn net.Conn, batchSize int) *bufWriter { @@ -339,7 +329,8 @@ func (w *bufWriter) Write(b []byte) (n int, err error) { return 0, w.err } if w.batchSize == 0 { // Buffer has been disabled. - return w.conn.Write(b) + n, err = w.conn.Write(b) + return n, toIOError(err) } for len(b) > 0 { nn := copy(w.buf[w.offset:], b) @@ -360,14 +351,31 @@ func (w *bufWriter) Flush() error { if w.offset == 0 { return nil } - if w.onFlush != nil { - w.onFlush() - } _, w.err = w.conn.Write(w.buf[:w.offset]) + w.err = toIOError(w.err) w.offset = 0 return w.err } +type ioError struct { + error +} + +func (i ioError) Unwrap() error { + return i.error +} + +func isIOError(err error) bool { + return errors.As(err, &ioError{}) +} + +func toIOError(err error) error { + if err == nil { + return nil + } + return ioError{error: err} +} + type framer struct { writer *bufWriter fr *http2.Framer diff --git a/vendor/google.golang.org/grpc/internal/transport/logging.go b/vendor/google.golang.org/grpc/internal/transport/logging.go new file mode 100644 index 000000000..42ed2b07a --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/logging.go @@ -0,0 +1,40 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "fmt" + + "google.golang.org/grpc/grpclog" + internalgrpclog "google.golang.org/grpc/internal/grpclog" +) + +var logger = grpclog.Component("transport") + +func prefixLoggerForServerTransport(p *http2Server) *internalgrpclog.PrefixLogger { + return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[server-transport %p] ", p)) +} + +func prefixLoggerForServerHandlerTransport(p *serverHandlerTransport) *internalgrpclog.PrefixLogger { + return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[server-handler-transport %p] ", p)) +} + +func prefixLoggerForClientTransport(p *http2Client) *internalgrpclog.PrefixLogger { + return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[client-transport %p] ", p)) +} diff --git a/vendor/google.golang.org/grpc/internal/transport/networktype/networktype.go b/vendor/google.golang.org/grpc/internal/transport/networktype/networktype.go index 7bb53cff1..c11b52782 100644 --- a/vendor/google.golang.org/grpc/internal/transport/networktype/networktype.go +++ b/vendor/google.golang.org/grpc/internal/transport/networktype/networktype.go @@ -31,7 +31,7 @@ const key = keyType("grpc.internal.transport.networktype") // Set returns a copy of the provided address with attributes containing networkType. func Set(address resolver.Address, networkType string) resolver.Address { - address.Attributes = address.Attributes.WithValues(key, networkType) + address.Attributes = address.Attributes.WithValue(key, networkType) return address } diff --git a/vendor/google.golang.org/grpc/internal/transport/proxy.go b/vendor/google.golang.org/grpc/internal/transport/proxy.go index a662bf39a..415961987 100644 --- a/vendor/google.golang.org/grpc/internal/transport/proxy.go +++ b/vendor/google.golang.org/grpc/internal/transport/proxy.go @@ -37,7 +37,7 @@ var ( httpProxyFromEnvironment = http.ProxyFromEnvironment ) -func mapAddress(ctx context.Context, address string) (*url.URL, error) { +func mapAddress(address string) (*url.URL, error) { req := &http.Request{ URL: &url.URL{ Scheme: "https", @@ -114,7 +114,7 @@ func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, backendAddr stri // connection. func proxyDial(ctx context.Context, addr string, grpcUA string) (conn net.Conn, err error) { newAddr := addr - proxyURL, err := mapAddress(ctx, addr) + proxyURL, err := mapAddress(addr) if err != nil { return nil, err } diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go index 141981264..aa1c89659 100644 --- a/vendor/google.golang.org/grpc/internal/transport/transport.go +++ b/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -30,9 +30,11 @@ import ( "net" "sync" "sync/atomic" + "time" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" "google.golang.org/grpc/resolver" @@ -41,6 +43,10 @@ import ( "google.golang.org/grpc/tap" ) +// ErrNoHeaders is used as a signal that a trailers only response was received, +// and is not a real error. +var ErrNoHeaders = errors.New("stream has no headers") + const logLevel = 2 type bufferPool struct { @@ -251,6 +257,9 @@ type Stream struct { fc *inFlow wq *writeQuota + // Holds compressor names passed in grpc-accept-encoding metadata from the + // client. This is empty for the client side stream. + clientAdvertisedCompressors string // Callback to state application's intentions to read data. This // is used to adjust flow control, if needed. requestRead func(int) @@ -339,8 +348,24 @@ func (s *Stream) RecvCompress() string { } // SetSendCompress sets the compression algorithm to the stream. -func (s *Stream) SetSendCompress(str string) { - s.sendCompress = str +func (s *Stream) SetSendCompress(name string) error { + if s.isHeaderSent() || s.getState() == streamDone { + return errors.New("transport: set send compressor called after headers sent or stream done") + } + + s.sendCompress = name + return nil +} + +// SendCompress returns the send compressor name. +func (s *Stream) SendCompress() string { + return s.sendCompress +} + +// ClientAdvertisedCompressors returns the compressor names advertised by the +// client via grpc-accept-encoding header. +func (s *Stream) ClientAdvertisedCompressors() string { + return s.clientAdvertisedCompressors } // Done returns a channel which is closed when it receives the final status @@ -364,9 +389,15 @@ func (s *Stream) Header() (metadata.MD, error) { return s.header.Copy(), nil } s.waitOnHeader() + if !s.headerValid { return nil, s.status.Err() } + + if s.noHeaders { + return nil, ErrNoHeaders + } + return s.header.Copy(), nil } @@ -518,16 +549,17 @@ const ( // ServerConfig consists of all the configurations to establish a server transport. type ServerConfig struct { MaxStreams uint32 - AuthInfo credentials.AuthInfo + ConnectionTimeout time.Duration + Credentials credentials.TransportCredentials InTapHandle tap.ServerInHandle - StatsHandler stats.Handler + StatsHandlers []stats.Handler KeepaliveParams keepalive.ServerParameters KeepalivePolicy keepalive.EnforcementPolicy InitialWindowSize int32 InitialConnWindowSize int32 WriteBufferSize int ReadBufferSize int - ChannelzParentID int64 + ChannelzParentID *channelz.Identifier MaxHeaderListSize *uint32 HeaderTableSize *uint32 } @@ -550,8 +582,8 @@ type ConnectOptions struct { CredsBundle credentials.Bundle // KeepaliveParams stores the keepalive parameters. KeepaliveParams keepalive.ClientParameters - // StatsHandler stores the handler for stats. - StatsHandler stats.Handler + // StatsHandlers stores the handler for stats. + StatsHandlers []stats.Handler // InitialWindowSize sets the initial window size for a stream. InitialWindowSize int32 // InitialConnWindowSize sets the initial window size for a connection. @@ -561,7 +593,7 @@ type ConnectOptions struct { // ReadBufferSize sets the size of read buffer, which in turn determines how much data can be read at most for one read syscall. ReadBufferSize int // ChannelzParentID sets the addrConn id which initiate the creation of this client transport. - ChannelzParentID int64 + ChannelzParentID *channelz.Identifier // MaxHeaderListSize sets the max (uncompressed) size of header list that is prepared to be received. MaxHeaderListSize *uint32 // UseProxy specifies if a proxy should be used. @@ -570,8 +602,8 @@ type ConnectOptions struct { // NewClientTransport establishes the transport with the required ConnectOptions // and returns it to the caller. -func NewClientTransport(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onPrefaceReceipt func(), onGoAway func(GoAwayReason), onClose func()) (ClientTransport, error) { - return newHTTP2Client(connectCtx, ctx, addr, opts, onPrefaceReceipt, onGoAway, onClose) +func NewClientTransport(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onClose func(GoAwayReason)) (ClientTransport, error) { + return newHTTP2Client(connectCtx, ctx, addr, opts, onClose) } // Options provides additional hints and information for message @@ -688,13 +720,13 @@ type ServerTransport interface { // Close tears down the transport. Once it is called, the transport // should not be accessed any more. All the pending streams and their // handlers will be terminated asynchronously. - Close() + Close(err error) // RemoteAddr returns the remote network address. RemoteAddr() net.Addr // Drain notifies the client this ServerTransport stops accepting new RPCs. - Drain() + Drain(debugData string) // IncrMsgSent increments the number of message sent through this transport. IncrMsgSent() @@ -739,6 +771,12 @@ func (e ConnectionError) Origin() error { return e.err } +// Unwrap returns the original error of this connection error or nil when the +// origin is nil. +func (e ConnectionError) Unwrap() error { + return e.err +} + var ( // ErrConnClosing indicates that the transport is closing. ErrConnClosing = connectionErrorf(true, nil, "transport is closing") diff --git a/vendor/google.golang.org/grpc/internal/xds_handshake_cluster.go b/vendor/google.golang.org/grpc/internal/xds_handshake_cluster.go index 3677c3f04..e8b492774 100644 --- a/vendor/google.golang.org/grpc/internal/xds_handshake_cluster.go +++ b/vendor/google.golang.org/grpc/internal/xds_handshake_cluster.go @@ -28,7 +28,7 @@ type handshakeClusterNameKey struct{} // SetXDSHandshakeClusterName returns a copy of addr in which the Attributes field // is updated with the cluster name. func SetXDSHandshakeClusterName(addr resolver.Address, clusterName string) resolver.Address { - addr.Attributes = addr.Attributes.WithValues(handshakeClusterNameKey{}, clusterName) + addr.Attributes = addr.Attributes.WithValue(handshakeClusterNameKey{}, clusterName) return addr } diff --git a/vendor/google.golang.org/grpc/metadata/metadata.go b/vendor/google.golang.org/grpc/metadata/metadata.go index 3604c7819..a2cdcaf12 100644 --- a/vendor/google.golang.org/grpc/metadata/metadata.go +++ b/vendor/google.golang.org/grpc/metadata/metadata.go @@ -41,16 +41,17 @@ type MD map[string][]string // New creates an MD from a given key-value map. // // Only the following ASCII characters are allowed in keys: -// - digits: 0-9 -// - uppercase letters: A-Z (normalized to lower) -// - lowercase letters: a-z -// - special characters: -_. +// - digits: 0-9 +// - uppercase letters: A-Z (normalized to lower) +// - lowercase letters: a-z +// - special characters: -_. +// // Uppercase letters are automatically converted to lowercase. // // Keys beginning with "grpc-" are reserved for grpc-internal use only and may // result in errors if set in metadata. func New(m map[string]string) MD { - md := MD{} + md := make(MD, len(m)) for k, val := range m { key := strings.ToLower(k) md[key] = append(md[key], val) @@ -62,10 +63,11 @@ func New(m map[string]string) MD { // Pairs panics if len(kv) is odd. // // Only the following ASCII characters are allowed in keys: -// - digits: 0-9 -// - uppercase letters: A-Z (normalized to lower) -// - lowercase letters: a-z -// - special characters: -_. +// - digits: 0-9 +// - uppercase letters: A-Z (normalized to lower) +// - lowercase letters: a-z +// - special characters: -_. +// // Uppercase letters are automatically converted to lowercase. // // Keys beginning with "grpc-" are reserved for grpc-internal use only and may @@ -74,7 +76,7 @@ func Pairs(kv ...string) MD { if len(kv)%2 == 1 { panic(fmt.Sprintf("metadata: Pairs got the odd number of input pairs for metadata: %d", len(kv))) } - md := MD{} + md := make(MD, len(kv)/2) for i := 0; i < len(kv); i += 2 { key := strings.ToLower(kv[i]) md[key] = append(md[key], kv[i+1]) @@ -89,7 +91,11 @@ func (md MD) Len() int { // Copy returns a copy of md. func (md MD) Copy() MD { - return Join(md) + out := make(MD, len(md)) + for k, v := range md { + out[k] = copyOf(v) + } + return out } // Get obtains the values for a given key. @@ -169,8 +175,11 @@ func AppendToOutgoingContext(ctx context.Context, kv ...string) context.Context md, _ := ctx.Value(mdOutgoingKey{}).(rawMD) added := make([][]string, len(md.added)+1) copy(added, md.added) - added[len(added)-1] = make([]string, len(kv)) - copy(added[len(added)-1], kv) + kvCopy := make([]string, 0, len(kv)) + for i := 0; i < len(kv); i += 2 { + kvCopy = append(kvCopy, strings.ToLower(kv[i]), kv[i+1]) + } + added[len(added)-1] = kvCopy return context.WithValue(ctx, mdOutgoingKey{}, rawMD{md: md.md, added: added}) } @@ -182,17 +191,51 @@ func FromIncomingContext(ctx context.Context) (MD, bool) { if !ok { return nil, false } - out := MD{} + out := make(MD, len(md)) for k, v := range md { // We need to manually convert all keys to lower case, because MD is a // map, and there's no guarantee that the MD attached to the context is // created using our helper functions. key := strings.ToLower(k) - out[key] = v + out[key] = copyOf(v) } return out, true } +// ValueFromIncomingContext returns the metadata value corresponding to the metadata +// key from the incoming metadata if it exists. Key must be lower-case. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func ValueFromIncomingContext(ctx context.Context, key string) []string { + md, ok := ctx.Value(mdIncomingKey{}).(MD) + if !ok { + return nil + } + + if v, ok := md[key]; ok { + return copyOf(v) + } + for k, v := range md { + // We need to manually convert all keys to lower case, because MD is a + // map, and there's no guarantee that the MD attached to the context is + // created using our helper functions. + if strings.ToLower(k) == key { + return copyOf(v) + } + } + return nil +} + +// the returned slice must not be modified in place +func copyOf(v []string) []string { + vals := make([]string, len(v)) + copy(vals, v) + return vals +} + // FromOutgoingContextRaw returns the un-merged, intermediary contents of rawMD. // // Remember to perform strings.ToLower on the keys, for both the returned MD (MD @@ -220,13 +263,18 @@ func FromOutgoingContext(ctx context.Context) (MD, bool) { return nil, false } - out := MD{} + mdSize := len(raw.md) + for i := range raw.added { + mdSize += len(raw.added[i]) / 2 + } + + out := make(MD, mdSize) for k, v := range raw.md { // We need to manually convert all keys to lower case, because MD is a // map, and there's no guarantee that the MD attached to the context is // created using our helper functions. key := strings.ToLower(k) - out[key] = v + out[key] = copyOf(v) } for _, added := range raw.added { if len(added)%2 == 1 { diff --git a/vendor/google.golang.org/grpc/picker_wrapper.go b/vendor/google.golang.org/grpc/picker_wrapper.go index 0878ada9d..02f975951 100644 --- a/vendor/google.golang.org/grpc/picker_wrapper.go +++ b/vendor/google.golang.org/grpc/picker_wrapper.go @@ -26,6 +26,7 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/codes" "google.golang.org/grpc/internal/channelz" + istatus "google.golang.org/grpc/internal/status" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/status" ) @@ -35,6 +36,7 @@ import ( type pickerWrapper struct { mu sync.Mutex done bool + idle bool blockingCh chan struct{} picker balancer.Picker } @@ -46,7 +48,11 @@ func newPickerWrapper() *pickerWrapper { // updatePicker is called by UpdateBalancerState. It unblocks all blocked pick. func (pw *pickerWrapper) updatePicker(p balancer.Picker) { pw.mu.Lock() - if pw.done { + if pw.done || pw.idle { + // There is a small window where a picker update from the LB policy can + // race with the channel going to idle mode. If the picker is idle here, + // it is because the channel asked it to do so, and therefore it is sage + // to ignore the update from the LB policy. pw.mu.Unlock() return } @@ -57,12 +63,16 @@ func (pw *pickerWrapper) updatePicker(p balancer.Picker) { pw.mu.Unlock() } -func doneChannelzWrapper(acw *acBalancerWrapper, done func(balancer.DoneInfo)) func(balancer.DoneInfo) { - acw.mu.Lock() - ac := acw.ac - acw.mu.Unlock() +// doneChannelzWrapper performs the following: +// - increments the calls started channelz counter +// - wraps the done function in the passed in result to increment the calls +// failed or calls succeeded channelz counter before invoking the actual +// done function. +func doneChannelzWrapper(acbw *acBalancerWrapper, result *balancer.PickResult) { + ac := acbw.ac ac.incrCallsStarted() - return func(b balancer.DoneInfo) { + done := result.Done + result.Done = func(b balancer.DoneInfo) { if b.Err != nil && b.Err != io.EOF { ac.incrCallsFailed() } else { @@ -81,7 +91,7 @@ func doneChannelzWrapper(acw *acBalancerWrapper, done func(balancer.DoneInfo)) f // - the current picker returns other errors and failfast is false. // - the subConn returned by the current picker is not READY // When one of these situations happens, pick blocks until the picker gets updated. -func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.PickInfo) (transport.ClientTransport, func(balancer.DoneInfo), error) { +func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.PickInfo) (transport.ClientTransport, balancer.PickResult, error) { var ch chan struct{} var lastPickErr error @@ -89,7 +99,7 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. pw.mu.Lock() if pw.done { pw.mu.Unlock() - return nil, nil, ErrClientConnClosing + return nil, balancer.PickResult{}, ErrClientConnClosing } if pw.picker == nil { @@ -110,9 +120,9 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. } switch ctx.Err() { case context.DeadlineExceeded: - return nil, nil, status.Error(codes.DeadlineExceeded, errStr) + return nil, balancer.PickResult{}, status.Error(codes.DeadlineExceeded, errStr) case context.Canceled: - return nil, nil, status.Error(codes.Canceled, errStr) + return nil, balancer.PickResult{}, status.Error(codes.Canceled, errStr) } case <-ch: } @@ -124,14 +134,17 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. pw.mu.Unlock() pickResult, err := p.Pick(info) - if err != nil { if err == balancer.ErrNoSubConnAvailable { continue } - if _, ok := status.FromError(err); ok { + if st, ok := status.FromError(err); ok { // Status error: end the RPC unconditionally with this status. - return nil, nil, err + // First restrict the code to the list allowed by gRFC A54. + if istatus.IsRestrictedControlPlaneCode(st) { + err = status.Errorf(codes.Internal, "received picker error with illegal status: %v", err) + } + return nil, balancer.PickResult{}, dropError{error: err} } // For all other errors, wait for ready RPCs should block and other // RPCs should fail with unavailable. @@ -139,19 +152,20 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. lastPickErr = err continue } - return nil, nil, status.Error(codes.Unavailable, err.Error()) + return nil, balancer.PickResult{}, status.Error(codes.Unavailable, err.Error()) } - acw, ok := pickResult.SubConn.(*acBalancerWrapper) + acbw, ok := pickResult.SubConn.(*acBalancerWrapper) if !ok { - logger.Error("subconn returned from pick is not *acBalancerWrapper") + logger.Errorf("subconn returned from pick is type %T, not *acBalancerWrapper", pickResult.SubConn) continue } - if t := acw.getAddrConn().getReadyTransport(); t != nil { + if t := acbw.ac.getReadyTransport(); t != nil { if channelz.IsOn() { - return t, doneChannelzWrapper(acw, pickResult.Done), nil + doneChannelzWrapper(acbw, &pickResult) + return t, pickResult, nil } - return t, pickResult.Done, nil + return t, pickResult, nil } if pickResult.Done != nil { // Calling done with nil error, no bytes sent and no bytes received. @@ -175,3 +189,28 @@ func (pw *pickerWrapper) close() { pw.done = true close(pw.blockingCh) } + +func (pw *pickerWrapper) enterIdleMode() { + pw.mu.Lock() + defer pw.mu.Unlock() + if pw.done { + return + } + pw.idle = true +} + +func (pw *pickerWrapper) exitIdleMode() { + pw.mu.Lock() + defer pw.mu.Unlock() + if pw.done { + return + } + pw.blockingCh = make(chan struct{}) + pw.idle = false +} + +// dropError is a wrapper error that indicates the LB policy wishes to drop the +// RPC and not retry it. +type dropError struct { + error +} diff --git a/vendor/google.golang.org/grpc/pickfirst.go b/vendor/google.golang.org/grpc/pickfirst.go index b858c2a5e..abe266b02 100644 --- a/vendor/google.golang.org/grpc/pickfirst.go +++ b/vendor/google.golang.org/grpc/pickfirst.go @@ -19,11 +19,15 @@ package grpc import ( + "encoding/json" "errors" "fmt" "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal/envconfig" + "google.golang.org/grpc/internal/grpcrand" + "google.golang.org/grpc/serviceconfig" ) // PickFirstBalancerName is the name of the pick_first balancer. @@ -43,94 +47,181 @@ func (*pickfirstBuilder) Name() string { return PickFirstBalancerName } +type pfConfig struct { + serviceconfig.LoadBalancingConfig `json:"-"` + + // If set to true, instructs the LB policy to shuffle the order of the list + // of addresses received from the name resolver before attempting to + // connect to them. + ShuffleAddressList bool `json:"shuffleAddressList"` +} + +func (*pickfirstBuilder) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + cfg := &pfConfig{} + if err := json.Unmarshal(js, cfg); err != nil { + return nil, fmt.Errorf("pickfirst: unable to unmarshal LB policy config: %s, error: %v", string(js), err) + } + return cfg, nil +} + type pickfirstBalancer struct { - state connectivity.State - cc balancer.ClientConn - sc balancer.SubConn + state connectivity.State + cc balancer.ClientConn + subConn balancer.SubConn + cfg *pfConfig } func (b *pickfirstBalancer) ResolverError(err error) { - switch b.state { - case connectivity.TransientFailure, connectivity.Idle, connectivity.Connecting: - // Set a failing picker if we don't have a good picker. - b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.TransientFailure, - Picker: &picker{err: fmt.Errorf("name resolver error: %v", err)}, - }) - } if logger.V(2) { - logger.Infof("pickfirstBalancer: ResolverError called with error %v", err) + logger.Infof("pickfirstBalancer: ResolverError called with error: %v", err) + } + if b.subConn == nil { + b.state = connectivity.TransientFailure + } + + if b.state != connectivity.TransientFailure { + // The picker will not change since the balancer does not currently + // report an error. + return } + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: &picker{err: fmt.Errorf("name resolver error: %v", err)}, + }) } -func (b *pickfirstBalancer) UpdateClientConnState(cs balancer.ClientConnState) error { - if len(cs.ResolverState.Addresses) == 0 { +func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error { + addrs := state.ResolverState.Addresses + if len(addrs) == 0 { + // The resolver reported an empty address list. Treat it like an error by + // calling b.ResolverError. + if b.subConn != nil { + // Remove the old subConn. All addresses were removed, so it is no longer + // valid. + b.cc.RemoveSubConn(b.subConn) + b.subConn = nil + } b.ResolverError(errors.New("produced zero addresses")) return balancer.ErrBadResolverState } - if b.sc == nil { - var err error - b.sc, err = b.cc.NewSubConn(cs.ResolverState.Addresses, balancer.NewSubConnOptions{}) - if err != nil { - if logger.V(2) { - logger.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err) - } - b.state = connectivity.TransientFailure - b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.TransientFailure, - Picker: &picker{err: fmt.Errorf("error creating connection: %v", err)}, - }) - return balancer.ErrBadResolverState + + if state.BalancerConfig != nil { + cfg, ok := state.BalancerConfig.(*pfConfig) + if !ok { + return fmt.Errorf("pickfirstBalancer: received nil or illegal BalancerConfig (type %T): %v", state.BalancerConfig, state.BalancerConfig) } - b.state = connectivity.Idle - b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.Idle, Picker: &picker{result: balancer.PickResult{SubConn: b.sc}}}) - b.sc.Connect() - } else { - b.cc.UpdateAddresses(b.sc, cs.ResolverState.Addresses) - b.sc.Connect() + b.cfg = cfg + } + + if envconfig.PickFirstLBConfig && b.cfg != nil && b.cfg.ShuffleAddressList { + grpcrand.Shuffle(len(addrs), func(i, j int) { addrs[i], addrs[j] = addrs[j], addrs[i] }) + } + if b.subConn != nil { + b.cc.UpdateAddresses(b.subConn, addrs) + return nil } + + subConn, err := b.cc.NewSubConn(addrs, balancer.NewSubConnOptions{}) + if err != nil { + if logger.V(2) { + logger.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err) + } + b.state = connectivity.TransientFailure + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: &picker{err: fmt.Errorf("error creating connection: %v", err)}, + }) + return balancer.ErrBadResolverState + } + b.subConn = subConn + b.state = connectivity.Idle + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: &picker{err: balancer.ErrNoSubConnAvailable}, + }) + b.subConn.Connect() return nil } -func (b *pickfirstBalancer) UpdateSubConnState(sc balancer.SubConn, s balancer.SubConnState) { +func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state balancer.SubConnState) { if logger.V(2) { - logger.Infof("pickfirstBalancer: UpdateSubConnState: %p, %v", sc, s) + logger.Infof("pickfirstBalancer: UpdateSubConnState: %p, %v", subConn, state) } - if b.sc != sc { + if b.subConn != subConn { if logger.V(2) { - logger.Infof("pickfirstBalancer: ignored state change because sc is not recognized") + logger.Infof("pickfirstBalancer: ignored state change because subConn is not recognized") } return } - b.state = s.ConnectivityState - if s.ConnectivityState == connectivity.Shutdown { - b.sc = nil + if state.ConnectivityState == connectivity.Shutdown { + b.subConn = nil return } - switch s.ConnectivityState { - case connectivity.Ready, connectivity.Idle: - b.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState, Picker: &picker{result: balancer.PickResult{SubConn: sc}}}) + switch state.ConnectivityState { + case connectivity.Ready: + b.cc.UpdateState(balancer.State{ + ConnectivityState: state.ConnectivityState, + Picker: &picker{result: balancer.PickResult{SubConn: subConn}}, + }) case connectivity.Connecting: - b.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState, Picker: &picker{err: balancer.ErrNoSubConnAvailable}}) + if b.state == connectivity.TransientFailure { + // We stay in TransientFailure until we are Ready. See A62. + return + } + b.cc.UpdateState(balancer.State{ + ConnectivityState: state.ConnectivityState, + Picker: &picker{err: balancer.ErrNoSubConnAvailable}, + }) + case connectivity.Idle: + if b.state == connectivity.TransientFailure { + // We stay in TransientFailure until we are Ready. Also kick the + // subConn out of Idle into Connecting. See A62. + b.subConn.Connect() + return + } + b.cc.UpdateState(balancer.State{ + ConnectivityState: state.ConnectivityState, + Picker: &idlePicker{subConn: subConn}, + }) case connectivity.TransientFailure: b.cc.UpdateState(balancer.State{ - ConnectivityState: s.ConnectivityState, - Picker: &picker{err: s.ConnectionError}, + ConnectivityState: state.ConnectivityState, + Picker: &picker{err: state.ConnectionError}, }) } + b.state = state.ConnectivityState } func (b *pickfirstBalancer) Close() { } +func (b *pickfirstBalancer) ExitIdle() { + if b.subConn != nil && b.state == connectivity.Idle { + b.subConn.Connect() + } +} + type picker struct { result balancer.PickResult err error } -func (p *picker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { +func (p *picker) Pick(balancer.PickInfo) (balancer.PickResult, error) { return p.result, p.err } +// idlePicker is used when the SubConn is IDLE and kicks the SubConn into +// CONNECTING when Pick is called. +type idlePicker struct { + subConn balancer.SubConn +} + +func (i *idlePicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { + i.subConn.Connect() + return balancer.PickResult{}, balancer.ErrNoSubConnAvailable +} + func init() { balancer.Register(newPickfirstBuilder()) } diff --git a/vendor/google.golang.org/grpc/preloader.go b/vendor/google.golang.org/grpc/preloader.go index 0a1e975ad..cd4554785 100644 --- a/vendor/google.golang.org/grpc/preloader.go +++ b/vendor/google.golang.org/grpc/preloader.go @@ -25,7 +25,7 @@ import ( // PreparedMsg is responsible for creating a Marshalled and Compressed object. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. diff --git a/vendor/google.golang.org/grpc/regenerate.sh b/vendor/google.golang.org/grpc/regenerate.sh index dfd3226a1..a6f26c8ab 100644 --- a/vendor/google.golang.org/grpc/regenerate.sh +++ b/vendor/google.golang.org/grpc/regenerate.sh @@ -27,9 +27,9 @@ export PATH=${GOBIN}:${PATH} mkdir -p ${GOBIN} echo "remove existing generated files" -# grpc_testingv3/testv3.pb.go is not re-generated because it was -# intentionally generated by an older version of protoc-gen-go. -rm -f $(find . -name '*.pb.go' | grep -v 'grpc_testingv3/testv3.pb.go') +# grpc_testing_not_regenerate/*.pb.go is not re-generated, +# see grpc_testing_not_regenerate/README.md for details. +rm -f $(find . -name '*.pb.go' | grep -v 'grpc_testing_not_regenerate') echo "go install google.golang.org/protobuf/cmd/protoc-gen-go" (cd test/tools && go install google.golang.org/protobuf/cmd/protoc-gen-go) @@ -57,7 +57,8 @@ LEGACY_SOURCES=( ${WORKDIR}/grpc-proto/grpc/health/v1/health.proto ${WORKDIR}/grpc-proto/grpc/lb/v1/load_balancer.proto profiling/proto/service.proto - reflection/grpc_reflection_v1alpha/reflection.proto + ${WORKDIR}/grpc-proto/grpc/reflection/v1alpha/reflection.proto + ${WORKDIR}/grpc-proto/grpc/reflection/v1/reflection.proto ) # Generates only the new gRPC Service symbols @@ -68,7 +69,6 @@ SOURCES=( ${WORKDIR}/grpc-proto/grpc/gcp/transport_security_common.proto ${WORKDIR}/grpc-proto/grpc/lookup/v1/rls.proto ${WORKDIR}/grpc-proto/grpc/lookup/v1/rls_config.proto - ${WORKDIR}/grpc-proto/grpc/service_config/service_config.proto ${WORKDIR}/grpc-proto/grpc/testing/*.proto ${WORKDIR}/grpc-proto/grpc/core/*.proto ) @@ -76,7 +76,20 @@ SOURCES=( # These options of the form 'Mfoo.proto=bar' instruct the codegen to use an # import path of 'bar' in the generated code when 'foo.proto' is imported in # one of the sources. -OPTS=Mgrpc/service_config/service_config.proto=/internal/proto/grpc_service_config,Mgrpc/core/stats.proto=google.golang.org/grpc/interop/grpc_testing/core +# +# Note that the protos listed here are all for testing purposes. All protos to +# be used externally should have a go_package option (and they don't need to be +# listed here). +OPTS=Mgrpc/core/stats.proto=google.golang.org/grpc/interop/grpc_testing/core,\ +Mgrpc/testing/benchmark_service.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/stats.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/report_qps_scenario_service.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/messages.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/worker_service.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/control.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/test.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/payloads.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/empty.proto=google.golang.org/grpc/interop/grpc_testing for src in ${SOURCES[@]}; do echo "protoc ${src}" @@ -85,7 +98,6 @@ for src in ${SOURCES[@]}; do -I${WORKDIR}/grpc-proto \ -I${WORKDIR}/googleapis \ -I${WORKDIR}/protobuf/src \ - -I${WORKDIR}/istio \ ${src} done @@ -96,24 +108,16 @@ for src in ${LEGACY_SOURCES[@]}; do -I${WORKDIR}/grpc-proto \ -I${WORKDIR}/googleapis \ -I${WORKDIR}/protobuf/src \ - -I${WORKDIR}/istio \ ${src} done # The go_package option in grpc/lookup/v1/rls.proto doesn't match the # current location. Move it into the right place. -mkdir -p ${WORKDIR}/out/google.golang.org/grpc/balancer/rls/internal/proto/grpc_lookup_v1 -mv ${WORKDIR}/out/google.golang.org/grpc/lookup/grpc_lookup_v1/* ${WORKDIR}/out/google.golang.org/grpc/balancer/rls/internal/proto/grpc_lookup_v1 - -# grpc_testingv3/testv3.pb.go is not re-generated because it was -# intentionally generated by an older version of protoc-gen-go. -rm ${WORKDIR}/out/google.golang.org/grpc/reflection/grpc_testingv3/*.pb.go - -# grpc/service_config/service_config.proto does not have a go_package option. -mv ${WORKDIR}/out/grpc/service_config/service_config.pb.go internal/proto/grpc_service_config +mkdir -p ${WORKDIR}/out/google.golang.org/grpc/internal/proto/grpc_lookup_v1 +mv ${WORKDIR}/out/google.golang.org/grpc/lookup/grpc_lookup_v1/* ${WORKDIR}/out/google.golang.org/grpc/internal/proto/grpc_lookup_v1 -# grpc/testing does not have a go_package option. -mv ${WORKDIR}/out/grpc/testing/*.pb.go interop/grpc_testing/ -mv ${WORKDIR}/out/grpc/core/*.pb.go interop/grpc_testing/core/ +# grpc_testing_not_regenerate/*.pb.go are not re-generated, +# see grpc_testing_not_regenerate/README.md for details. +rm ${WORKDIR}/out/google.golang.org/grpc/reflection/grpc_testing_not_regenerate/*.pb.go cp -R ${WORKDIR}/out/google.golang.org/grpc/* . diff --git a/vendor/google.golang.org/grpc/resolver/map.go b/vendor/google.golang.org/grpc/resolver/map.go new file mode 100644 index 000000000..efcb7f3ef --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver/map.go @@ -0,0 +1,138 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package resolver + +type addressMapEntry struct { + addr Address + value interface{} +} + +// AddressMap is a map of addresses to arbitrary values taking into account +// Attributes. BalancerAttributes are ignored, as are Metadata and Type. +// Multiple accesses may not be performed concurrently. Must be created via +// NewAddressMap; do not construct directly. +type AddressMap struct { + // The underlying map is keyed by an Address with fields that we don't care + // about being set to their zero values. The only fields that we care about + // are `Addr`, `ServerName` and `Attributes`. Since we need to be able to + // distinguish between addresses with same `Addr` and `ServerName`, but + // different `Attributes`, we cannot store the `Attributes` in the map key. + // + // The comparison operation for structs work as follows: + // Struct values are comparable if all their fields are comparable. Two + // struct values are equal if their corresponding non-blank fields are equal. + // + // The value type of the map contains a slice of addresses which match the key + // in their `Addr` and `ServerName` fields and contain the corresponding value + // associated with them. + m map[Address]addressMapEntryList +} + +func toMapKey(addr *Address) Address { + return Address{Addr: addr.Addr, ServerName: addr.ServerName} +} + +type addressMapEntryList []*addressMapEntry + +// NewAddressMap creates a new AddressMap. +func NewAddressMap() *AddressMap { + return &AddressMap{m: make(map[Address]addressMapEntryList)} +} + +// find returns the index of addr in the addressMapEntry slice, or -1 if not +// present. +func (l addressMapEntryList) find(addr Address) int { + for i, entry := range l { + // Attributes are the only thing to match on here, since `Addr` and + // `ServerName` are already equal. + if entry.addr.Attributes.Equal(addr.Attributes) { + return i + } + } + return -1 +} + +// Get returns the value for the address in the map, if present. +func (a *AddressMap) Get(addr Address) (value interface{}, ok bool) { + addrKey := toMapKey(&addr) + entryList := a.m[addrKey] + if entry := entryList.find(addr); entry != -1 { + return entryList[entry].value, true + } + return nil, false +} + +// Set updates or adds the value to the address in the map. +func (a *AddressMap) Set(addr Address, value interface{}) { + addrKey := toMapKey(&addr) + entryList := a.m[addrKey] + if entry := entryList.find(addr); entry != -1 { + entryList[entry].value = value + return + } + a.m[addrKey] = append(entryList, &addressMapEntry{addr: addr, value: value}) +} + +// Delete removes addr from the map. +func (a *AddressMap) Delete(addr Address) { + addrKey := toMapKey(&addr) + entryList := a.m[addrKey] + entry := entryList.find(addr) + if entry == -1 { + return + } + if len(entryList) == 1 { + entryList = nil + } else { + copy(entryList[entry:], entryList[entry+1:]) + entryList = entryList[:len(entryList)-1] + } + a.m[addrKey] = entryList +} + +// Len returns the number of entries in the map. +func (a *AddressMap) Len() int { + ret := 0 + for _, entryList := range a.m { + ret += len(entryList) + } + return ret +} + +// Keys returns a slice of all current map keys. +func (a *AddressMap) Keys() []Address { + ret := make([]Address, 0, a.Len()) + for _, entryList := range a.m { + for _, entry := range entryList { + ret = append(ret, entry.addr) + } + } + return ret +} + +// Values returns a slice of all current map values. +func (a *AddressMap) Values() []interface{} { + ret := make([]interface{}, 0, a.Len()) + for _, entryList := range a.m { + for _, entry := range entryList { + ret = append(ret, entry.value) + } + } + return ret +} diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go index 6a9d234a5..353c10b69 100644 --- a/vendor/google.golang.org/grpc/resolver/resolver.go +++ b/vendor/google.golang.org/grpc/resolver/resolver.go @@ -22,7 +22,10 @@ package resolver import ( "context" + "fmt" "net" + "net/url" + "strings" "google.golang.org/grpc/attributes" "google.golang.org/grpc/credentials" @@ -38,8 +41,9 @@ var ( // TODO(bar) install dns resolver in init(){}. -// Register registers the resolver builder to the resolver map. b.Scheme will be -// used as the scheme registered with this builder. +// Register registers the resolver builder to the resolver map. b.Scheme will +// be used as the scheme registered with this builder. The registry is case +// sensitive, and schemes should not contain any uppercase characters. // // NOTE: this function must only be called during initialization time (i.e. in // an init() function), and is not thread-safe. If multiple Resolvers are @@ -94,7 +98,7 @@ const ( // Address represents a server the client connects to. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -116,9 +120,14 @@ type Address struct { ServerName string // Attributes contains arbitrary data about this address intended for - // consumption by the load balancing policy. + // consumption by the SubConn. Attributes *attributes.Attributes + // BalancerAttributes contains arbitrary data about this address intended + // for consumption by the LB policy. These attributes do not affect SubConn + // creation, connection establishment, handshaking, etc. + BalancerAttributes *attributes.Attributes + // Type is the type of this address. // // Deprecated: use Attributes instead. @@ -131,6 +140,30 @@ type Address struct { Metadata interface{} } +// Equal returns whether a and o are identical. Metadata is compared directly, +// not with any recursive introspection. +func (a Address) Equal(o Address) bool { + return a.Addr == o.Addr && a.ServerName == o.ServerName && + a.Attributes.Equal(o.Attributes) && + a.BalancerAttributes.Equal(o.BalancerAttributes) && + a.Type == o.Type && a.Metadata == o.Metadata +} + +// String returns JSON formatted string representation of the address. +func (a Address) String() string { + var sb strings.Builder + sb.WriteString(fmt.Sprintf("{Addr: %q, ", a.Addr)) + sb.WriteString(fmt.Sprintf("ServerName: %q, ", a.ServerName)) + if a.Attributes != nil { + sb.WriteString(fmt.Sprintf("Attributes: %v, ", a.Attributes.String())) + } + if a.BalancerAttributes != nil { + sb.WriteString(fmt.Sprintf("BalancerAttributes: %v", a.BalancerAttributes.String())) + } + sb.WriteString("}") + return sb.String() +} + // BuildOptions includes additional information for the builder to create // the resolver. type BuildOptions struct { @@ -181,6 +214,15 @@ type State struct { // gRPC to add new methods to this interface. type ClientConn interface { // UpdateState updates the state of the ClientConn appropriately. + // + // If an error is returned, the resolver should try to resolve the + // target again. The resolver should use a backoff timer to prevent + // overloading the server with requests. If a resolver is certain that + // reresolving will not change the result, e.g. because it is + // a watch-based resolver, returned errors can be ignored. + // + // If the resolved State is the same as the last reported one, calling + // UpdateState can be omitted. UpdateState(State) error // ReportError notifies the ClientConn that the Resolver encountered an // error. The ClientConn will notify the load balancer and begin calling @@ -204,25 +246,51 @@ type ClientConn interface { // Target represents a target for gRPC, as specified in: // https://github.com/grpc/grpc/blob/master/doc/naming.md. -// It is parsed from the target string that gets passed into Dial or DialContext by the user. And -// grpc passes it to the resolver and the balancer. +// It is parsed from the target string that gets passed into Dial or DialContext +// by the user. And gRPC passes it to the resolver and the balancer. // -// If the target follows the naming spec, and the parsed scheme is registered with grpc, we will -// parse the target string according to the spec. e.g. "dns://some_authority/foo.bar" will be parsed -// into &Target{Scheme: "dns", Authority: "some_authority", Endpoint: "foo.bar"} +// If the target follows the naming spec, and the parsed scheme is registered +// with gRPC, we will parse the target string according to the spec. If the +// target does not contain a scheme or if the parsed scheme is not registered +// (i.e. no corresponding resolver available to resolve the endpoint), we will +// apply the default scheme, and will attempt to reparse it. // -// If the target does not contain a scheme, we will apply the default scheme, and set the Target to -// be the full target string. e.g. "foo.bar" will be parsed into -// &Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "foo.bar"}. +// Examples: // -// If the parsed scheme is not registered (i.e. no corresponding resolver available to resolve the -// endpoint), we set the Scheme to be the default scheme, and set the Endpoint to be the full target -// string. e.g. target string "unknown_scheme://authority/endpoint" will be parsed into -// &Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "unknown_scheme://authority/endpoint"}. +// - "dns://some_authority/foo.bar" +// Target{Scheme: "dns", Authority: "some_authority", Endpoint: "foo.bar"} +// - "foo.bar" +// Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "foo.bar"} +// - "unknown_scheme://authority/endpoint" +// Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "unknown_scheme://authority/endpoint"} type Target struct { - Scheme string + // Deprecated: use URL.Scheme instead. + Scheme string + // Deprecated: use URL.Host instead. Authority string - Endpoint string + // URL contains the parsed dial target with an optional default scheme added + // to it if the original dial target contained no scheme or contained an + // unregistered scheme. Any query params specified in the original dial + // target can be accessed from here. + URL url.URL +} + +// Endpoint retrieves endpoint without leading "/" from either `URL.Path` +// or `URL.Opaque`. The latter is used when the former is empty. +func (t Target) Endpoint() string { + endpoint := t.URL.Path + if endpoint == "" { + endpoint = t.URL.Opaque + } + // For targets of the form "[scheme]://[authority]/endpoint, the endpoint + // value returned from url.Parse() contains a leading "/". Although this is + // in accordance with RFC 3986, we do not want to break existing resolver + // implementations which expect the endpoint without the leading "/". So, we + // end up stripping the leading "/" here. But this will result in an + // incorrect parsing for something like "unix:///path/to/socket". Since we + // own the "unix" resolver, we can workaround in the unix resolver by using + // the `URL` field. + return strings.TrimPrefix(endpoint, "/") } // Builder creates a resolver that will be used to watch name resolution updates. @@ -232,8 +300,10 @@ type Builder interface { // gRPC dial calls Build synchronously, and fails if the returned error is // not nil. Build(target Target, cc ClientConn, opts BuildOptions) (Resolver, error) - // Scheme returns the scheme supported by this resolver. - // Scheme is defined at https://github.com/grpc/grpc/blob/master/doc/naming.md. + // Scheme returns the scheme supported by this resolver. Scheme is defined + // at https://github.com/grpc/grpc/blob/master/doc/naming.md. The returned + // string should not contain uppercase characters, as they will not match + // the parsed target's scheme as defined in RFC 3986. Scheme() string } diff --git a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go index 2c47cd54f..b408b3688 100644 --- a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go +++ b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go @@ -19,149 +19,204 @@ package grpc import ( - "fmt" + "context" "strings" "sync" "google.golang.org/grpc/balancer" - "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" ) +// resolverStateUpdater wraps the single method used by ccResolverWrapper to +// report a state update from the actual resolver implementation. +type resolverStateUpdater interface { + updateResolverState(s resolver.State, err error) error +} + // ccResolverWrapper is a wrapper on top of cc for resolvers. // It implements resolver.ClientConn interface. type ccResolverWrapper struct { - cc *ClientConn - resolverMu sync.Mutex - resolver resolver.Resolver - done *grpcsync.Event - curState resolver.State + // The following fields are initialized when the wrapper is created and are + // read-only afterwards, and therefore can be accessed without a mutex. + cc resolverStateUpdater + channelzID *channelz.Identifier + ignoreServiceConfig bool + opts ccResolverWrapperOpts + serializer *grpcsync.CallbackSerializer // To serialize all incoming calls. + serializerCancel context.CancelFunc // To close the serializer, accessed only from close(). + + // All incoming (resolver --> gRPC) calls are guaranteed to execute in a + // mutually exclusive manner as they are scheduled on the serializer. + // Fields accessed *only* in these serializer callbacks, can therefore be + // accessed without a mutex. + curState resolver.State + + // mu guards access to the below fields. + mu sync.Mutex + closed bool + resolver resolver.Resolver // Accessed only from outgoing calls. +} - incomingMu sync.Mutex // Synchronizes all the incoming calls. +// ccResolverWrapperOpts wraps the arguments to be passed when creating a new +// ccResolverWrapper. +type ccResolverWrapperOpts struct { + target resolver.Target // User specified dial target to resolve. + builder resolver.Builder // Resolver builder to use. + bOpts resolver.BuildOptions // Resolver build options to use. + channelzID *channelz.Identifier // Channelz identifier for the channel. } // newCCResolverWrapper uses the resolver.Builder to build a Resolver and // returns a ccResolverWrapper object which wraps the newly built resolver. -func newCCResolverWrapper(cc *ClientConn, rb resolver.Builder) (*ccResolverWrapper, error) { +func newCCResolverWrapper(cc resolverStateUpdater, opts ccResolverWrapperOpts) (*ccResolverWrapper, error) { + ctx, cancel := context.WithCancel(context.Background()) ccr := &ccResolverWrapper{ - cc: cc, - done: grpcsync.NewEvent(), - } - - var credsClone credentials.TransportCredentials - if creds := cc.dopts.copts.TransportCredentials; creds != nil { - credsClone = creds.Clone() - } - rbo := resolver.BuildOptions{ - DisableServiceConfig: cc.dopts.disableServiceConfig, - DialCreds: credsClone, - CredsBundle: cc.dopts.copts.CredsBundle, - Dialer: cc.dopts.copts.Dialer, - } - - var err error - // We need to hold the lock here while we assign to the ccr.resolver field - // to guard against a data race caused by the following code path, - // rb.Build-->ccr.ReportError-->ccr.poll-->ccr.resolveNow, would end up - // accessing ccr.resolver which is being assigned here. - ccr.resolverMu.Lock() - defer ccr.resolverMu.Unlock() - ccr.resolver, err = rb.Build(cc.parsedTarget, ccr, rbo) + cc: cc, + channelzID: opts.channelzID, + ignoreServiceConfig: opts.bOpts.DisableServiceConfig, + opts: opts, + serializer: grpcsync.NewCallbackSerializer(ctx), + serializerCancel: cancel, + } + + // Cannot hold the lock at build time because the resolver can send an + // update or error inline and these incoming calls grab the lock to schedule + // a callback in the serializer. + r, err := opts.builder.Build(opts.target, ccr, opts.bOpts) if err != nil { + cancel() return nil, err } + + // Any error reported by the resolver at build time that leads to a + // re-resolution request from the balancer is dropped by grpc until we + // return from this function. So, we don't have to handle pending resolveNow + // requests here. + ccr.mu.Lock() + ccr.resolver = r + ccr.mu.Unlock() + return ccr, nil } func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOptions) { - ccr.resolverMu.Lock() - if !ccr.done.HasFired() { - ccr.resolver.ResolveNow(o) + ccr.mu.Lock() + defer ccr.mu.Unlock() + + // ccr.resolver field is set only after the call to Build() returns. But in + // the process of building, the resolver may send an error update which when + // propagated to the balancer may result in a re-resolution request. + if ccr.closed || ccr.resolver == nil { + return } - ccr.resolverMu.Unlock() + ccr.resolver.ResolveNow(o) } func (ccr *ccResolverWrapper) close() { - ccr.resolverMu.Lock() - ccr.resolver.Close() - ccr.done.Fire() - ccr.resolverMu.Unlock() + ccr.mu.Lock() + if ccr.closed { + ccr.mu.Unlock() + return + } + + channelz.Info(logger, ccr.channelzID, "Closing the name resolver") + + // Close the serializer to ensure that no more calls from the resolver are + // handled, before actually closing the resolver. + ccr.serializerCancel() + ccr.closed = true + r := ccr.resolver + ccr.mu.Unlock() + + // Give enqueued callbacks a chance to finish. + <-ccr.serializer.Done + + // Spawn a goroutine to close the resolver (since it may block trying to + // cleanup all allocated resources) and return early. + go r.Close() } +// serializerScheduleLocked is a convenience method to schedule a function to be +// run on the serializer while holding ccr.mu. +func (ccr *ccResolverWrapper) serializerScheduleLocked(f func(context.Context)) { + ccr.mu.Lock() + ccr.serializer.Schedule(f) + ccr.mu.Unlock() +} + +// UpdateState is called by resolver implementations to report new state to gRPC +// which includes addresses and service config. func (ccr *ccResolverWrapper) UpdateState(s resolver.State) error { - ccr.incomingMu.Lock() - defer ccr.incomingMu.Unlock() - if ccr.done.HasFired() { - return nil - } - channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: sending update to cc: %v", s) - if channelz.IsOn() { + errCh := make(chan error, 1) + ok := ccr.serializer.Schedule(func(context.Context) { ccr.addChannelzTraceEvent(s) + ccr.curState = s + if err := ccr.cc.updateResolverState(ccr.curState, nil); err == balancer.ErrBadResolverState { + errCh <- balancer.ErrBadResolverState + return + } + errCh <- nil + }) + if !ok { + // The only time when Schedule() fail to add the callback to the + // serializer is when the serializer is closed, and this happens only + // when the resolver wrapper is closed. + return nil } - ccr.curState = s - if err := ccr.cc.updateResolverState(ccr.curState, nil); err == balancer.ErrBadResolverState { - return balancer.ErrBadResolverState - } - return nil + return <-errCh } +// ReportError is called by resolver implementations to report errors +// encountered during name resolution to gRPC. func (ccr *ccResolverWrapper) ReportError(err error) { - ccr.incomingMu.Lock() - defer ccr.incomingMu.Unlock() - if ccr.done.HasFired() { - return - } - channelz.Warningf(logger, ccr.cc.channelzID, "ccResolverWrapper: reporting error to cc: %v", err) - ccr.cc.updateResolverState(resolver.State{}, err) + ccr.serializerScheduleLocked(func(_ context.Context) { + channelz.Warningf(logger, ccr.channelzID, "ccResolverWrapper: reporting error to cc: %v", err) + ccr.cc.updateResolverState(resolver.State{}, err) + }) } -// NewAddress is called by the resolver implementation to send addresses to gRPC. +// NewAddress is called by the resolver implementation to send addresses to +// gRPC. func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) { - ccr.incomingMu.Lock() - defer ccr.incomingMu.Unlock() - if ccr.done.HasFired() { - return - } - channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: sending new addresses to cc: %v", addrs) - if channelz.IsOn() { + ccr.serializerScheduleLocked(func(_ context.Context) { ccr.addChannelzTraceEvent(resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig}) - } - ccr.curState.Addresses = addrs - ccr.cc.updateResolverState(ccr.curState, nil) + ccr.curState.Addresses = addrs + ccr.cc.updateResolverState(ccr.curState, nil) + }) } // NewServiceConfig is called by the resolver implementation to send service // configs to gRPC. func (ccr *ccResolverWrapper) NewServiceConfig(sc string) { - ccr.incomingMu.Lock() - defer ccr.incomingMu.Unlock() - if ccr.done.HasFired() { - return - } - channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: got new service config: %v", sc) - if ccr.cc.dopts.disableServiceConfig { - channelz.Info(logger, ccr.cc.channelzID, "Service config lookups disabled; ignoring config") - return - } - scpr := parseServiceConfig(sc) - if scpr.Err != nil { - channelz.Warningf(logger, ccr.cc.channelzID, "ccResolverWrapper: error parsing service config: %v", scpr.Err) - return - } - if channelz.IsOn() { + ccr.serializerScheduleLocked(func(_ context.Context) { + channelz.Infof(logger, ccr.channelzID, "ccResolverWrapper: got new service config: %s", sc) + if ccr.ignoreServiceConfig { + channelz.Info(logger, ccr.channelzID, "Service config lookups disabled; ignoring config") + return + } + scpr := parseServiceConfig(sc) + if scpr.Err != nil { + channelz.Warningf(logger, ccr.channelzID, "ccResolverWrapper: error parsing service config: %v", scpr.Err) + return + } ccr.addChannelzTraceEvent(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: scpr}) - } - ccr.curState.ServiceConfig = scpr - ccr.cc.updateResolverState(ccr.curState, nil) + ccr.curState.ServiceConfig = scpr + ccr.cc.updateResolverState(ccr.curState, nil) + }) } +// ParseServiceConfig is called by resolver implementations to parse a JSON +// representation of the service config. func (ccr *ccResolverWrapper) ParseServiceConfig(scJSON string) *serviceconfig.ParseResult { return parseServiceConfig(scJSON) } +// addChannelzTraceEvent adds a channelz trace event containing the new +// state received from resolver implementations. func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) { var updates []string var oldSC, newSC *ServiceConfig @@ -180,8 +235,5 @@ func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) { } else if len(ccr.curState.Addresses) == 0 && len(s.Addresses) > 0 { updates = append(updates, "resolver returned new addresses") } - channelz.AddTraceEvent(logger, ccr.cc.channelzID, 0, &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Resolver state updated: %+v (%v)", s, strings.Join(updates, "; ")), - Severity: channelz.CtInfo, - }) + channelz.Infof(logger, ccr.channelzID, "Resolver state updated: %s (%v)", pretty.ToJSON(s), strings.Join(updates, "; ")) } diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go index 87987a2e6..2030736a3 100644 --- a/vendor/google.golang.org/grpc/rpc_util.go +++ b/vendor/google.golang.org/grpc/rpc_util.go @@ -25,7 +25,6 @@ import ( "encoding/binary" "fmt" "io" - "io/ioutil" "math" "strings" "sync" @@ -77,7 +76,7 @@ func NewGZIPCompressorWithLevel(level int) (Compressor, error) { return &gzipCompressor{ pool: sync.Pool{ New: func() interface{} { - w, err := gzip.NewWriterLevel(ioutil.Discard, level) + w, err := gzip.NewWriterLevel(io.Discard, level) if err != nil { panic(err) } @@ -143,7 +142,7 @@ func (d *gzipDecompressor) Do(r io.Reader) ([]byte, error) { z.Close() d.pool.Put(z) }() - return ioutil.ReadAll(z) + return io.ReadAll(z) } func (d *gzipDecompressor) Type() string { @@ -160,6 +159,7 @@ type callInfo struct { contentSubtype string codec baseCodec maxRetryRPCBufferSize int + onFinish []func(err error) } func defaultCallInfo() *callInfo { @@ -198,7 +198,7 @@ func Header(md *metadata.MD) CallOption { // HeaderCallOption is a CallOption for collecting response header metadata. // The metadata field will be populated *after* the RPC completes. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -220,7 +220,7 @@ func Trailer(md *metadata.MD) CallOption { // TrailerCallOption is a CallOption for collecting response trailer metadata. // The metadata field will be populated *after* the RPC completes. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -242,7 +242,7 @@ func Peer(p *peer.Peer) CallOption { // PeerCallOption is a CallOption for collecting the identity of the remote // peer. The peer field will be populated *after* the RPC completes. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -282,7 +282,7 @@ func FailFast(failFast bool) CallOption { // FailFastCallOption is a CallOption for indicating whether an RPC should fail // fast or not. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -296,8 +296,44 @@ func (o FailFastCallOption) before(c *callInfo) error { } func (o FailFastCallOption) after(c *callInfo, attempt *csAttempt) {} +// OnFinish returns a CallOption that configures a callback to be called when +// the call completes. The error passed to the callback is the status of the +// RPC, and may be nil. The onFinish callback provided will only be called once +// by gRPC. This is mainly used to be used by streaming interceptors, to be +// notified when the RPC completes along with information about the status of +// the RPC. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func OnFinish(onFinish func(err error)) CallOption { + return OnFinishCallOption{ + OnFinish: onFinish, + } +} + +// OnFinishCallOption is CallOption that indicates a callback to be called when +// the call completes. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type OnFinishCallOption struct { + OnFinish func(error) +} + +func (o OnFinishCallOption) before(c *callInfo) error { + c.onFinish = append(c.onFinish, o.OnFinish) + return nil +} + +func (o OnFinishCallOption) after(c *callInfo, attempt *csAttempt) {} + // MaxCallRecvMsgSize returns a CallOption which sets the maximum message size -// in bytes the client can receive. +// in bytes the client can receive. If this is not set, gRPC uses the default +// 4MB. func MaxCallRecvMsgSize(bytes int) CallOption { return MaxRecvMsgSizeCallOption{MaxRecvMsgSize: bytes} } @@ -305,7 +341,7 @@ func MaxCallRecvMsgSize(bytes int) CallOption { // MaxRecvMsgSizeCallOption is a CallOption that indicates the maximum message // size in bytes the client can receive. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -320,7 +356,8 @@ func (o MaxRecvMsgSizeCallOption) before(c *callInfo) error { func (o MaxRecvMsgSizeCallOption) after(c *callInfo, attempt *csAttempt) {} // MaxCallSendMsgSize returns a CallOption which sets the maximum message size -// in bytes the client can send. +// in bytes the client can send. If this is not set, gRPC uses the default +// `math.MaxInt32`. func MaxCallSendMsgSize(bytes int) CallOption { return MaxSendMsgSizeCallOption{MaxSendMsgSize: bytes} } @@ -328,7 +365,7 @@ func MaxCallSendMsgSize(bytes int) CallOption { // MaxSendMsgSizeCallOption is a CallOption that indicates the maximum message // size in bytes the client can send. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -351,7 +388,7 @@ func PerRPCCredentials(creds credentials.PerRPCCredentials) CallOption { // PerRPCCredsCallOption is a CallOption that indicates the per-RPC // credentials to use for the call. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -369,7 +406,7 @@ func (o PerRPCCredsCallOption) after(c *callInfo, attempt *csAttempt) {} // sending the request. If WithCompressor is also set, UseCompressor has // higher priority. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -379,7 +416,7 @@ func UseCompressor(name string) CallOption { // CompressorCallOption is a CallOption that indicates the compressor to use. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -416,7 +453,7 @@ func CallContentSubtype(contentSubtype string) CallOption { // ContentSubtypeCallOption is a CallOption that indicates the content-subtype // used for marshaling messages. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -444,7 +481,7 @@ func (o ContentSubtypeCallOption) after(c *callInfo, attempt *csAttempt) {} // This function is provided for advanced users; prefer to use only // CallContentSubtype to select a registered codec instead. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -455,7 +492,7 @@ func ForceCodec(codec encoding.Codec) CallOption { // ForceCodecCallOption is a CallOption that indicates the codec used for // marshaling messages. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -480,7 +517,7 @@ func CallCustomCodec(codec Codec) CallOption { // CustomCodecCallOption is a CallOption that indicates the codec used for // marshaling messages. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -497,7 +534,7 @@ func (o CustomCodecCallOption) after(c *callInfo, attempt *csAttempt) {} // MaxRetryRPCBufferSize returns a CallOption that limits the amount of memory // used for buffering this RPC's requests for retry purposes. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -508,7 +545,7 @@ func MaxRetryRPCBufferSize(bytes int) CallOption { // MaxRetryRPCBufferSizeCallOption is a CallOption indicating the amount of // memory to be used for caching this RPC for retry purposes. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -548,10 +585,11 @@ type parser struct { // format. The caller owns the returned msg memory. // // If there is an error, possible values are: -// * io.EOF, when no messages remain -// * io.ErrUnexpectedEOF -// * of type transport.ConnectionError -// * an error from the status package +// - io.EOF, when no messages remain +// - io.ErrUnexpectedEOF +// - of type transport.ConnectionError +// - an error from the status package +// // No other error values or types must be returned, which also means // that the underlying io.Reader must not return an incompatible // error. @@ -656,12 +694,13 @@ func msgHeader(data, compData []byte) (hdr []byte, payload []byte) { func outPayload(client bool, msg interface{}, data, payload []byte, t time.Time) *stats.OutPayload { return &stats.OutPayload{ - Client: client, - Payload: msg, - Data: data, - Length: len(data), - WireLength: len(payload) + headerLen, - SentTime: t, + Client: client, + Payload: msg, + Data: data, + Length: len(data), + WireLength: len(payload) + headerLen, + CompressedLength: len(payload), + SentTime: t, } } @@ -682,7 +721,7 @@ func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool } type payloadInfo struct { - wireLength int // The compressed length got from wire. + compressedLength int // The compressed length got from wire. uncompressedBytes []byte } @@ -692,7 +731,7 @@ func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxRecei return nil, err } if payInfo != nil { - payInfo.wireLength = len(d) + payInfo.compressedLength = len(d) } if st := checkRecvPayload(pf, s.RecvCompress(), compressor != nil || dc != nil); st != nil { @@ -710,15 +749,13 @@ func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxRecei d, size, err = decompress(compressor, d, maxReceiveMessageSize) } if err != nil { - return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err) + return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message: %v", err) + } + if size > maxReceiveMessageSize { + // TODO: Revisit the error code. Currently keep it consistent with java + // implementation. + return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message after decompression larger than max (%d vs. %d)", size, maxReceiveMessageSize) } - } else { - size = len(d) - } - if size > maxReceiveMessageSize { - // TODO: Revisit the error code. Currently keep it consistent with java - // implementation. - return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", size, maxReceiveMessageSize) } return d, nil } @@ -747,7 +784,7 @@ func decompress(compressor encoding.Compressor, d []byte, maxReceiveMessageSize } // Read from LimitReader with limit max+1. So if the underlying // reader is over limit, the result will be bigger than max. - d, err = ioutil.ReadAll(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) + d, err = io.ReadAll(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) return d, len(d), err } @@ -760,7 +797,7 @@ func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m interf return err } if err := c.Unmarshal(d, m); err != nil { - return status.Errorf(codes.Internal, "grpc: failed to unmarshal the received message %v", err) + return status.Errorf(codes.Internal, "grpc: failed to unmarshal the received message: %v", err) } if payInfo != nil { payInfo.uncompressedBytes = d diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go index 0251f48da..8869cc906 100644 --- a/vendor/google.golang.org/grpc/server.go +++ b/vendor/google.golang.org/grpc/server.go @@ -43,8 +43,8 @@ import ( "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/binarylog" "google.golang.org/grpc/internal/channelz" - "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/grpcutil" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" @@ -73,6 +73,14 @@ func init() { internal.DrainServerTransports = func(srv *Server, addr string) { srv.drainServerTransports(addr) } + internal.AddGlobalServerOptions = func(opt ...ServerOption) { + globalServerOptions = append(globalServerOptions, opt...) + } + internal.ClearGlobalServerOptions = func() { + globalServerOptions = nil + } + internal.BinaryLogger = binaryLogger + internal.JoinServerOptions = newJoinServerOption } var statusOK = status.New(codes.OK, "") @@ -107,12 +115,6 @@ type serviceInfo struct { mdata interface{} } -type serverWorkerData struct { - st transport.ServerTransport - wg *sync.WaitGroup - stream *transport.Stream -} - // Server is a gRPC server to serve RPC requests. type Server struct { opts serverOptions @@ -134,10 +136,10 @@ type Server struct { channelzRemoveOnce sync.Once serveWG sync.WaitGroup // counts active Serve goroutines for GracefulStop - channelzID int64 // channelz unique identification number + channelzID *channelz.Identifier czData *channelzData - serverWorkerChannels []chan *serverWorkerData + serverWorkerChannel chan func() } type serverOptions struct { @@ -149,8 +151,9 @@ type serverOptions struct { streamInt StreamServerInterceptor chainUnaryInts []UnaryServerInterceptor chainStreamInts []StreamServerInterceptor + binaryLogger binarylog.Logger inTapHandle tap.ServerInHandle - statsHandler stats.Handler + statsHandlers []stats.Handler maxConcurrentStreams uint32 maxReceiveMessageSize int maxSendMessageSize int @@ -168,12 +171,14 @@ type serverOptions struct { } var defaultServerOptions = serverOptions{ + maxConcurrentStreams: math.MaxUint32, maxReceiveMessageSize: defaultServerMaxReceiveMessageSize, maxSendMessageSize: defaultServerMaxSendMessageSize, connectionTimeout: 120 * time.Second, writeBufferSize: defaultWriteBufSize, readBufferSize: defaultReadBufSize, } +var globalServerOptions []ServerOption // A ServerOption sets options such as credentials, codec and keepalive parameters, etc. type ServerOption interface { @@ -183,7 +188,7 @@ type ServerOption interface { // EmptyServerOption does not alter the server configuration. It can be embedded // in another structure to build custom server options. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -207,10 +212,27 @@ func newFuncServerOption(f func(*serverOptions)) *funcServerOption { } } -// WriteBufferSize determines how much data can be batched before doing a write on the wire. -// The corresponding memory allocation for this buffer will be twice the size to keep syscalls low. -// The default value for this buffer is 32KB. -// Zero will disable the write buffer such that each write will be on underlying connection. +// joinServerOption provides a way to combine arbitrary number of server +// options into one. +type joinServerOption struct { + opts []ServerOption +} + +func (mdo *joinServerOption) apply(do *serverOptions) { + for _, opt := range mdo.opts { + opt.apply(do) + } +} + +func newJoinServerOption(opts ...ServerOption) ServerOption { + return &joinServerOption{opts: opts} +} + +// WriteBufferSize determines how much data can be batched before doing a write +// on the wire. The corresponding memory allocation for this buffer will be +// twice the size to keep syscalls low. The default value for this buffer is +// 32KB. Zero or negative values will disable the write buffer such that each +// write will be on underlying connection. // Note: A Send call may not directly translate to a write. func WriteBufferSize(s int) ServerOption { return newFuncServerOption(func(o *serverOptions) { @@ -218,11 +240,10 @@ func WriteBufferSize(s int) ServerOption { }) } -// ReadBufferSize lets you set the size of read buffer, this determines how much data can be read at most -// for one read syscall. -// The default value for this buffer is 32KB. -// Zero will disable read buffer for a connection so data framer can access the underlying -// conn directly. +// ReadBufferSize lets you set the size of read buffer, this determines how much +// data can be read at most for one read syscall. The default value for this +// buffer is 32KB. Zero or negative values will disable read buffer for a +// connection so data framer can access the underlying conn directly. func ReadBufferSize(s int) ServerOption { return newFuncServerOption(func(o *serverOptions) { o.readBufferSize = s @@ -298,7 +319,7 @@ func CustomCodec(codec Codec) ServerOption { // https://github.com/grpc/grpc-go/blob/master/Documentation/encoding.md#using-a-codec. // Will be supported throughout 1.x. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -361,6 +382,9 @@ func MaxSendMsgSize(m int) ServerOption { // MaxConcurrentStreams returns a ServerOption that will apply a limit on the number // of concurrent streams to each ServerTransport. func MaxConcurrentStreams(n uint32) ServerOption { + if n == 0 { + n = math.MaxUint32 + } return newFuncServerOption(func(o *serverOptions) { o.maxConcurrentStreams = n }) @@ -419,7 +443,7 @@ func ChainStreamInterceptor(interceptors ...StreamServerInterceptor) ServerOptio // InTapHandle returns a ServerOption that sets the tap handle for all the server // transport to be created. Only one can be installed. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -435,7 +459,21 @@ func InTapHandle(h tap.ServerInHandle) ServerOption { // StatsHandler returns a ServerOption that sets the stats handler for the server. func StatsHandler(h stats.Handler) ServerOption { return newFuncServerOption(func(o *serverOptions) { - o.statsHandler = h + if h == nil { + logger.Error("ignoring nil parameter in grpc.StatsHandler ServerOption") + // Do not allow a nil stats handler, which would otherwise cause + // panics. + return + } + o.statsHandlers = append(o.statsHandlers, h) + }) +} + +// binaryLogger returns a ServerOption that can set the binary logger for the +// server. +func binaryLogger(bl binarylog.Logger) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.binaryLogger = bl }) } @@ -462,7 +500,7 @@ func UnknownServiceHandler(streamHandler StreamHandler) ServerOption { // new connections. If this is not set, the default is 120 seconds. A zero or // negative value will result in an immediate timeout. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -483,7 +521,7 @@ func MaxHeaderListSize(s uint32) ServerOption { // HeaderTableSize returns a ServerOption that sets the size of dynamic // header table for stream. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -498,7 +536,7 @@ func HeaderTableSize(s uint32) ServerOption { // zero (default) will disable workers and spawn a new goroutine for each // stream. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -520,46 +558,42 @@ func NumStreamWorkers(numServerWorkers uint32) ServerOption { const serverWorkerResetThreshold = 1 << 16 // serverWorkers blocks on a *transport.Stream channel forever and waits for -// data to be fed by serveStreams. This allows different requests to be +// data to be fed by serveStreams. This allows multiple requests to be // processed by the same goroutine, removing the need for expensive stack // re-allocations (see the runtime.morestack problem [1]). // // [1] https://github.com/golang/go/issues/18138 -func (s *Server) serverWorker(ch chan *serverWorkerData) { - // To make sure all server workers don't reset at the same time, choose a - // random number of iterations before resetting. - threshold := serverWorkerResetThreshold + grpcrand.Intn(serverWorkerResetThreshold) - for completed := 0; completed < threshold; completed++ { - data, ok := <-ch +func (s *Server) serverWorker() { + for completed := 0; completed < serverWorkerResetThreshold; completed++ { + f, ok := <-s.serverWorkerChannel if !ok { return } - s.handleStream(data.st, data.stream, s.traceInfo(data.st, data.stream)) - data.wg.Done() + f() } - go s.serverWorker(ch) + go s.serverWorker() } -// initServerWorkers creates worker goroutines and channels to process incoming +// initServerWorkers creates worker goroutines and a channel to process incoming // connections to reduce the time spent overall on runtime.morestack. func (s *Server) initServerWorkers() { - s.serverWorkerChannels = make([]chan *serverWorkerData, s.opts.numServerWorkers) + s.serverWorkerChannel = make(chan func()) for i := uint32(0); i < s.opts.numServerWorkers; i++ { - s.serverWorkerChannels[i] = make(chan *serverWorkerData) - go s.serverWorker(s.serverWorkerChannels[i]) + go s.serverWorker() } } func (s *Server) stopServerWorkers() { - for i := uint32(0); i < s.opts.numServerWorkers; i++ { - close(s.serverWorkerChannels[i]) - } + close(s.serverWorkerChannel) } // NewServer creates a gRPC server which has no service registered and has not // started to accept requests yet. func NewServer(opt ...ServerOption) *Server { opts := defaultServerOptions + for _, o := range globalServerOptions { + o.apply(&opts) + } for _, o := range opt { o.apply(&opts) } @@ -584,9 +618,8 @@ func NewServer(opt ...ServerOption) *Server { s.initServerWorkers() } - if channelz.IsOn() { - s.channelzID = channelz.RegisterServer(&channelzServer{s}, "") - } + s.channelzID = channelz.RegisterServer(&channelzServer{s}, "") + channelz.Info(logger, s.channelzID, "Server created") return s } @@ -710,16 +743,9 @@ func (s *Server) GetServiceInfo() map[string]ServiceInfo { // the server being stopped. var ErrServerStopped = errors.New("grpc: the server has been stopped") -func (s *Server) useTransportAuthenticator(rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { - if s.opts.creds == nil { - return rawConn, nil, nil - } - return s.opts.creds.ServerHandshake(rawConn) -} - type listenSocket struct { net.Listener - channelzID int64 + channelzID *channelz.Identifier } func (l *listenSocket) ChannelzMetric() *channelz.SocketInternalMetric { @@ -731,9 +757,8 @@ func (l *listenSocket) ChannelzMetric() *channelz.SocketInternalMetric { func (l *listenSocket) Close() error { err := l.Listener.Close() - if channelz.IsOn() { - channelz.RemoveEntry(l.channelzID) - } + channelz.RemoveEntry(l.channelzID) + channelz.Info(logger, l.channelzID, "ListenSocket deleted") return err } @@ -766,11 +791,6 @@ func (s *Server) Serve(lis net.Listener) error { ls := &listenSocket{Listener: lis} s.lis[ls] = true - if channelz.IsOn() { - ls.channelzID = channelz.RegisterListenSocket(ls, s.channelzID, lis.Addr().String()) - } - s.mu.Unlock() - defer func() { s.mu.Lock() if s.lis != nil && s.lis[ls] { @@ -780,8 +800,16 @@ func (s *Server) Serve(lis net.Listener) error { s.mu.Unlock() }() - var tempDelay time.Duration // how long to sleep on accept failure + var err error + ls.channelzID, err = channelz.RegisterListenSocket(ls, s.channelzID, lis.Addr().String()) + if err != nil { + s.mu.Unlock() + return err + } + s.mu.Unlock() + channelz.Info(logger, ls.channelzID, "ListenSocket created") + var tempDelay time.Duration // how long to sleep on accept failure for { rawConn, err := lis.Accept() if err != nil { @@ -839,35 +867,14 @@ func (s *Server) handleRawConn(lisAddr string, rawConn net.Conn) { return } rawConn.SetDeadline(time.Now().Add(s.opts.connectionTimeout)) - conn, authInfo, err := s.useTransportAuthenticator(rawConn) - if err != nil { - // ErrConnDispatched means that the connection was dispatched away from - // gRPC; those connections should be left open. - if err != credentials.ErrConnDispatched { - // In deployments where a gRPC server runs behind a cloud load - // balancer which performs regular TCP level health checks, the - // connection is closed immediately by the latter. Skipping the - // error here will help reduce log clutter. - if err != io.EOF { - s.mu.Lock() - s.errorf("ServerHandshake(%q) failed: %v", rawConn.RemoteAddr(), err) - s.mu.Unlock() - channelz.Warningf(logger, s.channelzID, "grpc: Server.Serve failed to complete security handshake from %q: %v", rawConn.RemoteAddr(), err) - } - rawConn.Close() - } - rawConn.SetDeadline(time.Time{}) - return - } // Finish handshaking (HTTP2) - st := s.newHTTP2Transport(conn, authInfo) + st := s.newHTTP2Transport(rawConn) + rawConn.SetDeadline(time.Time{}) if st == nil { - conn.Close() return } - rawConn.SetDeadline(time.Time{}) if !s.addConn(lisAddr, st) { return } @@ -881,19 +888,20 @@ func (s *Server) drainServerTransports(addr string) { s.mu.Lock() conns := s.conns[addr] for st := range conns { - st.Drain() + st.Drain("") } s.mu.Unlock() } // newHTTP2Transport sets up a http/2 transport (using the // gRPC http2 server transport in transport/http2_server.go). -func (s *Server) newHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) transport.ServerTransport { +func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport { config := &transport.ServerConfig{ MaxStreams: s.opts.maxConcurrentStreams, - AuthInfo: authInfo, + ConnectionTimeout: s.opts.connectionTimeout, + Credentials: s.opts.creds, InTapHandle: s.opts.inTapHandle, - StatsHandler: s.opts.statsHandler, + StatsHandlers: s.opts.statsHandlers, KeepaliveParams: s.opts.keepaliveParams, KeepalivePolicy: s.opts.keepalivePolicy, InitialWindowSize: s.opts.initialWindowSize, @@ -909,8 +917,15 @@ func (s *Server) newHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) tr s.mu.Lock() s.errorf("NewServerTransport(%q) failed: %v", c.RemoteAddr(), err) s.mu.Unlock() - c.Close() - channelz.Warning(logger, s.channelzID, "grpc: Server.Serve failed to create ServerTransport: ", err) + // ErrConnDispatched means that the connection was dispatched away from + // gRPC; those connections should be left open. + if err != credentials.ErrConnDispatched { + // Don't log on ErrConnDispatched and io.EOF to prevent log spam. + if err != io.EOF { + channelz.Info(logger, s.channelzID, "grpc: Server.Serve failed to create ServerTransport: ", err) + } + c.Close() + } return nil } @@ -918,29 +933,29 @@ func (s *Server) newHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) tr } func (s *Server) serveStreams(st transport.ServerTransport) { - defer st.Close() + defer st.Close(errors.New("finished serving streams for the server transport")) var wg sync.WaitGroup - var roundRobinCounter uint32 + streamQuota := newHandlerQuota(s.opts.maxConcurrentStreams) st.HandleStreams(func(stream *transport.Stream) { wg.Add(1) + + streamQuota.acquire() + f := func() { + defer streamQuota.release() + defer wg.Done() + s.handleStream(st, stream, s.traceInfo(st, stream)) + } + if s.opts.numServerWorkers > 0 { - data := &serverWorkerData{st: st, wg: &wg, stream: stream} select { - case s.serverWorkerChannels[atomic.AddUint32(&roundRobinCounter, 1)%s.opts.numServerWorkers] <- data: + case s.serverWorkerChannel <- f: + return default: // If all stream workers are busy, fallback to the default code path. - go func() { - s.handleStream(st, stream, s.traceInfo(st, stream)) - wg.Done() - }() } - } else { - go func() { - defer wg.Done() - s.handleStream(st, stream, s.traceInfo(st, stream)) - }() } + go f() }, func(ctx context.Context, method string) context.Context { if !EnableTracing { return ctx @@ -965,26 +980,27 @@ var _ http.Handler = (*Server)(nil) // To share one port (such as 443 for https) between gRPC and an // existing http.Handler, use a root http.Handler such as: // -// if r.ProtoMajor == 2 && strings.HasPrefix( -// r.Header.Get("Content-Type"), "application/grpc") { -// grpcServer.ServeHTTP(w, r) -// } else { -// yourMux.ServeHTTP(w, r) -// } +// if r.ProtoMajor == 2 && strings.HasPrefix( +// r.Header.Get("Content-Type"), "application/grpc") { +// grpcServer.ServeHTTP(w, r) +// } else { +// yourMux.ServeHTTP(w, r) +// } // // Note that ServeHTTP uses Go's HTTP/2 server implementation which is totally // separate from grpc-go's HTTP/2 server. Performance and features may vary // between the two paths. ServeHTTP does not support some gRPC features // available through grpc-go's HTTP/2 server. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { - st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandler) + st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandlers) if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) + // Errors returned from transport.NewServerHandlerTransport have + // already been written to w. return } if !s.addConn(listenerAddressForServeHTTP, st) { @@ -1022,13 +1038,13 @@ func (s *Server) addConn(addr string, st transport.ServerTransport) bool { s.mu.Lock() defer s.mu.Unlock() if s.conns == nil { - st.Close() + st.Close(errors.New("Server.addConn called when server has already been stopped")) return false } if s.drain { // Transport added after we drained our existing conns: drain it // immediately. - st.Drain() + st.Drain("") } if s.conns[addr] == nil { @@ -1095,8 +1111,10 @@ func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Str return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", len(payload), s.opts.maxSendMessageSize) } err = t.Write(stream, hdr, payload, opts) - if err == nil && s.opts.statsHandler != nil { - s.opts.statsHandler.HandleRPC(stream.Context(), outPayload(false, msg, data, payload, time.Now())) + if err == nil { + for _, sh := range s.opts.statsHandlers { + sh.HandleRPC(stream.Context(), outPayload(false, msg, data, payload, time.Now())) + } } return err } @@ -1124,27 +1142,27 @@ func chainUnaryServerInterceptors(s *Server) { func chainUnaryInterceptors(interceptors []UnaryServerInterceptor) UnaryServerInterceptor { return func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (interface{}, error) { - var i int - var next UnaryHandler - next = func(ctx context.Context, req interface{}) (interface{}, error) { - if i == len(interceptors)-1 { - return interceptors[i](ctx, req, info, handler) - } - i++ - return interceptors[i-1](ctx, req, info, next) - } - return next(ctx, req) + return interceptors[0](ctx, req, info, getChainUnaryHandler(interceptors, 0, info, handler)) + } +} + +func getChainUnaryHandler(interceptors []UnaryServerInterceptor, curr int, info *UnaryServerInfo, finalHandler UnaryHandler) UnaryHandler { + if curr == len(interceptors)-1 { + return finalHandler + } + return func(ctx context.Context, req interface{}) (interface{}, error) { + return interceptors[curr+1](ctx, req, info, getChainUnaryHandler(interceptors, curr+1, info, finalHandler)) } } func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, md *MethodDesc, trInfo *traceInfo) (err error) { - sh := s.opts.statsHandler - if sh != nil || trInfo != nil || channelz.IsOn() { + shs := s.opts.statsHandlers + if len(shs) != 0 || trInfo != nil || channelz.IsOn() { if channelz.IsOn() { s.incrCallsStarted() } var statsBegin *stats.Begin - if sh != nil { + for _, sh := range shs { beginTime := time.Now() statsBegin = &stats.Begin{ BeginTime: beginTime, @@ -1175,7 +1193,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. trInfo.tr.Finish() } - if sh != nil { + for _, sh := range shs { end := &stats.End{ BeginTime: statsBegin.BeginTime, EndTime: time.Now(), @@ -1195,9 +1213,16 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. } }() } - - binlog := binarylog.GetMethodLogger(stream.Method()) - if binlog != nil { + var binlogs []binarylog.MethodLogger + if ml := binarylog.GetMethodLogger(stream.Method()); ml != nil { + binlogs = append(binlogs, ml) + } + if s.opts.binaryLogger != nil { + if ml := s.opts.binaryLogger.GetMethodLogger(stream.Method()); ml != nil { + binlogs = append(binlogs, ml) + } + } + if len(binlogs) != 0 { ctx := stream.Context() md, _ := metadata.FromIncomingContext(ctx) logEntry := &binarylog.ClientHeader{ @@ -1217,7 +1242,9 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. if peer, ok := peer.FromContext(ctx); ok { logEntry.PeerAddr = peer.Addr } - binlog.Log(logEntry) + for _, binlog := range binlogs { + binlog.Log(ctx, logEntry) + } } // comp and cp are used for compression. decomp and dc are used for @@ -1227,6 +1254,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. var comp, decomp encoding.Compressor var cp Compressor var dc Decompressor + var sendCompressorName string // If dc is set and matches the stream's compression, use it. Otherwise, try // to find a matching registered compressor for decomp. @@ -1247,23 +1275,29 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. // NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686. if s.opts.cp != nil { cp = s.opts.cp - stream.SetSendCompress(cp.Type()) + sendCompressorName = cp.Type() } else if rc := stream.RecvCompress(); rc != "" && rc != encoding.Identity { // Legacy compressor not specified; attempt to respond with same encoding. comp = encoding.GetCompressor(rc) if comp != nil { - stream.SetSendCompress(rc) + sendCompressorName = comp.Name() + } + } + + if sendCompressorName != "" { + if err := stream.SetSendCompress(sendCompressorName); err != nil { + return status.Errorf(codes.Internal, "grpc: failed to set send compressor: %v", err) } } var payInfo *payloadInfo - if sh != nil || binlog != nil { + if len(shs) != 0 || len(binlogs) != 0 { payInfo = &payloadInfo{} } d, err := recvAndDecompress(&parser{r: stream}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp) if err != nil { if e := t.WriteStatus(stream, status.Convert(err)); e != nil { - channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status %v", e) + channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e) } return err } @@ -1274,19 +1308,23 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. if err := s.getCodec(stream.ContentSubtype()).Unmarshal(d, v); err != nil { return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err) } - if sh != nil { + for _, sh := range shs { sh.HandleRPC(stream.Context(), &stats.InPayload{ - RecvTime: time.Now(), - Payload: v, - WireLength: payInfo.wireLength + headerLen, - Data: d, - Length: len(d), + RecvTime: time.Now(), + Payload: v, + Length: len(d), + WireLength: payInfo.compressedLength + headerLen, + CompressedLength: payInfo.compressedLength, + Data: d, }) } - if binlog != nil { - binlog.Log(&binarylog.ClientMessage{ + if len(binlogs) != 0 { + cm := &binarylog.ClientMessage{ Message: d, - }) + } + for _, binlog := range binlogs { + binlog.Log(stream.Context(), cm) + } } if trInfo != nil { trInfo.tr.LazyLog(&payload{sent: false, msg: v}, true) @@ -1298,9 +1336,10 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. if appErr != nil { appStatus, ok := status.FromError(appErr) if !ok { - // Convert appErr if it is not a grpc status error. - appErr = status.Error(codes.Unknown, appErr.Error()) - appStatus, _ = status.FromError(appErr) + // Convert non-status application error to a status error with code + // Unknown, but handle context errors specifically. + appStatus = status.FromContextError(appErr) + appErr = appStatus.Err() } if trInfo != nil { trInfo.tr.LazyLog(stringer(appStatus.Message()), true) @@ -1309,18 +1348,24 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. if e := t.WriteStatus(stream, appStatus); e != nil { channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e) } - if binlog != nil { + if len(binlogs) != 0 { if h, _ := stream.Header(); h.Len() > 0 { // Only log serverHeader if there was header. Otherwise it can // be trailer only. - binlog.Log(&binarylog.ServerHeader{ + sh := &binarylog.ServerHeader{ Header: h, - }) + } + for _, binlog := range binlogs { + binlog.Log(stream.Context(), sh) + } } - binlog.Log(&binarylog.ServerTrailer{ + st := &binarylog.ServerTrailer{ Trailer: stream.Trailer(), Err: appErr, - }) + } + for _, binlog := range binlogs { + binlog.Log(stream.Context(), st) + } } return appErr } @@ -1329,6 +1374,11 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. } opts := &transport.Options{Last: true} + // Server handler could have set new compressor by calling SetSendCompressor. + // In case it is set, we need to use it for compressing outbound message. + if stream.SendCompress() != sendCompressorName { + comp = encoding.GetCompressor(stream.SendCompress()) + } if err := s.sendResponse(t, stream, reply, cp, opts, comp); err != nil { if err == io.EOF { // The entire stream is done (for unary RPC only). @@ -1346,26 +1396,34 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. panic(fmt.Sprintf("grpc: Unexpected error (%T) from sendResponse: %v", st, st)) } } - if binlog != nil { + if len(binlogs) != 0 { h, _ := stream.Header() - binlog.Log(&binarylog.ServerHeader{ + sh := &binarylog.ServerHeader{ Header: h, - }) - binlog.Log(&binarylog.ServerTrailer{ + } + st := &binarylog.ServerTrailer{ Trailer: stream.Trailer(), Err: appErr, - }) + } + for _, binlog := range binlogs { + binlog.Log(stream.Context(), sh) + binlog.Log(stream.Context(), st) + } } return err } - if binlog != nil { + if len(binlogs) != 0 { h, _ := stream.Header() - binlog.Log(&binarylog.ServerHeader{ + sh := &binarylog.ServerHeader{ Header: h, - }) - binlog.Log(&binarylog.ServerMessage{ + } + sm := &binarylog.ServerMessage{ Message: reply, - }) + } + for _, binlog := range binlogs { + binlog.Log(stream.Context(), sh) + binlog.Log(stream.Context(), sm) + } } if channelz.IsOn() { t.IncrMsgSent() @@ -1376,14 +1434,16 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. // TODO: Should we be logging if writing status failed here, like above? // Should the logging be in WriteStatus? Should we ignore the WriteStatus // error or allow the stats handler to see it? - err = t.WriteStatus(stream, statusOK) - if binlog != nil { - binlog.Log(&binarylog.ServerTrailer{ + if len(binlogs) != 0 { + st := &binarylog.ServerTrailer{ Trailer: stream.Trailer(), Err: appErr, - }) + } + for _, binlog := range binlogs { + binlog.Log(stream.Context(), st) + } } - return err + return t.WriteStatus(stream, statusOK) } // chainStreamServerInterceptors chains all stream server interceptors into one. @@ -1409,16 +1469,16 @@ func chainStreamServerInterceptors(s *Server) { func chainStreamInterceptors(interceptors []StreamServerInterceptor) StreamServerInterceptor { return func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error { - var i int - var next StreamHandler - next = func(srv interface{}, ss ServerStream) error { - if i == len(interceptors)-1 { - return interceptors[i](srv, ss, info, handler) - } - i++ - return interceptors[i-1](srv, ss, info, next) - } - return next(srv, ss) + return interceptors[0](srv, ss, info, getChainStreamHandler(interceptors, 0, info, handler)) + } +} + +func getChainStreamHandler(interceptors []StreamServerInterceptor, curr int, info *StreamServerInfo, finalHandler StreamHandler) StreamHandler { + if curr == len(interceptors)-1 { + return finalHandler + } + return func(srv interface{}, stream ServerStream) error { + return interceptors[curr+1](srv, stream, info, getChainStreamHandler(interceptors, curr+1, info, finalHandler)) } } @@ -1426,16 +1486,18 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp if channelz.IsOn() { s.incrCallsStarted() } - sh := s.opts.statsHandler + shs := s.opts.statsHandlers var statsBegin *stats.Begin - if sh != nil { + if len(shs) != 0 { beginTime := time.Now() statsBegin = &stats.Begin{ BeginTime: beginTime, IsClientStream: sd.ClientStreams, IsServerStream: sd.ServerStreams, } - sh.HandleRPC(stream.Context(), statsBegin) + for _, sh := range shs { + sh.HandleRPC(stream.Context(), statsBegin) + } } ctx := NewContextWithServerTransportStream(stream.Context(), stream) ss := &serverStream{ @@ -1447,10 +1509,10 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp maxReceiveMessageSize: s.opts.maxReceiveMessageSize, maxSendMessageSize: s.opts.maxSendMessageSize, trInfo: trInfo, - statsHandler: sh, + statsHandler: shs, } - if sh != nil || trInfo != nil || channelz.IsOn() { + if len(shs) != 0 || trInfo != nil || channelz.IsOn() { // See comment in processUnaryRPC on defers. defer func() { if trInfo != nil { @@ -1464,7 +1526,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp ss.mu.Unlock() } - if sh != nil { + if len(shs) != 0 { end := &stats.End{ BeginTime: statsBegin.BeginTime, EndTime: time.Now(), @@ -1472,7 +1534,9 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp if err != nil && err != io.EOF { end.Error = toRPCErr(err) } - sh.HandleRPC(stream.Context(), end) + for _, sh := range shs { + sh.HandleRPC(stream.Context(), end) + } } if channelz.IsOn() { @@ -1485,8 +1549,15 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp }() } - ss.binlog = binarylog.GetMethodLogger(stream.Method()) - if ss.binlog != nil { + if ml := binarylog.GetMethodLogger(stream.Method()); ml != nil { + ss.binlogs = append(ss.binlogs, ml) + } + if s.opts.binaryLogger != nil { + if ml := s.opts.binaryLogger.GetMethodLogger(stream.Method()); ml != nil { + ss.binlogs = append(ss.binlogs, ml) + } + } + if len(ss.binlogs) != 0 { md, _ := metadata.FromIncomingContext(ctx) logEntry := &binarylog.ClientHeader{ Header: md, @@ -1505,7 +1576,9 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp if peer, ok := peer.FromContext(ss.Context()); ok { logEntry.PeerAddr = peer.Addr } - ss.binlog.Log(logEntry) + for _, binlog := range ss.binlogs { + binlog.Log(stream.Context(), logEntry) + } } // If dc is set and matches the stream's compression, use it. Otherwise, try @@ -1527,12 +1600,18 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp // NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686. if s.opts.cp != nil { ss.cp = s.opts.cp - stream.SetSendCompress(s.opts.cp.Type()) + ss.sendCompressorName = s.opts.cp.Type() } else if rc := stream.RecvCompress(); rc != "" && rc != encoding.Identity { // Legacy compressor not specified; attempt to respond with same encoding. ss.comp = encoding.GetCompressor(rc) if ss.comp != nil { - stream.SetSendCompress(rc) + ss.sendCompressorName = rc + } + } + + if ss.sendCompressorName != "" { + if err := stream.SetSendCompress(ss.sendCompressorName); err != nil { + return status.Errorf(codes.Internal, "grpc: failed to set send compressor: %v", err) } } @@ -1559,7 +1638,9 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp if appErr != nil { appStatus, ok := status.FromError(appErr) if !ok { - appStatus = status.New(codes.Unknown, appErr.Error()) + // Convert non-status application error to a status error with code + // Unknown, but handle context errors specifically. + appStatus = status.FromContextError(appErr) appErr = appStatus.Err() } if trInfo != nil { @@ -1568,13 +1649,16 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp ss.trInfo.tr.SetError() ss.mu.Unlock() } - t.WriteStatus(ss.s, appStatus) - if ss.binlog != nil { - ss.binlog.Log(&binarylog.ServerTrailer{ + if len(ss.binlogs) != 0 { + st := &binarylog.ServerTrailer{ Trailer: ss.s.Trailer(), Err: appErr, - }) + } + for _, binlog := range ss.binlogs { + binlog.Log(stream.Context(), st) + } } + t.WriteStatus(ss.s, appStatus) // TODO: Should we log an error from WriteStatus here and below? return appErr } @@ -1583,14 +1667,16 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp ss.trInfo.tr.LazyLog(stringer("OK"), false) ss.mu.Unlock() } - err = t.WriteStatus(ss.s, statusOK) - if ss.binlog != nil { - ss.binlog.Log(&binarylog.ServerTrailer{ + if len(ss.binlogs) != 0 { + st := &binarylog.ServerTrailer{ Trailer: ss.s.Trailer(), Err: appErr, - }) + } + for _, binlog := range ss.binlogs { + binlog.Log(stream.Context(), st) + } } - return err + return t.WriteStatus(ss.s, statusOK) } func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream, trInfo *traceInfo) { @@ -1664,7 +1750,7 @@ type streamKey struct{} // NewContextWithServerTransportStream creates a new context from ctx and // attaches stream to it. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -1679,7 +1765,7 @@ func NewContextWithServerTransportStream(ctx context.Context, stream ServerTrans // // See also NewContextWithServerTransportStream. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -1694,7 +1780,7 @@ type ServerTransportStream interface { // ctx. Returns nil if the given context has no stream associated with it // (which implies it is not an RPC invocation context). // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -1716,11 +1802,7 @@ func (s *Server) Stop() { s.done.Fire() }() - s.channelzRemoveOnce.Do(func() { - if channelz.IsOn() { - channelz.RemoveEntry(s.channelzID) - } - }) + s.channelzRemoveOnce.Do(func() { channelz.RemoveEntry(s.channelzID) }) s.mu.Lock() listeners := s.lis @@ -1736,7 +1818,7 @@ func (s *Server) Stop() { } for _, cs := range conns { for st := range cs { - st.Close() + st.Close(errors.New("Server.Stop called")) } } if s.opts.numServerWorkers > 0 { @@ -1758,11 +1840,7 @@ func (s *Server) GracefulStop() { s.quit.Fire() defer s.done.Fire() - s.channelzRemoveOnce.Do(func() { - if channelz.IsOn() { - channelz.RemoveEntry(s.channelzID) - } - }) + s.channelzRemoveOnce.Do(func() { channelz.RemoveEntry(s.channelzID) }) s.mu.Lock() if s.conns == nil { s.mu.Unlock() @@ -1776,7 +1854,7 @@ func (s *Server) GracefulStop() { if !s.drain { for _, conns := range s.conns { for st := range conns { - st.Drain() + st.Drain("graceful_stop") } } s.drain = true @@ -1815,12 +1893,26 @@ func (s *Server) getCodec(contentSubtype string) baseCodec { return codec } -// SetHeader sets the header metadata. -// When called multiple times, all the provided metadata will be merged. -// All the metadata will be sent out when one of the following happens: -// - grpc.SendHeader() is called; -// - The first response is sent out; -// - An RPC status is sent out (error or success). +// SetHeader sets the header metadata to be sent from the server to the client. +// The context provided must be the context passed to the server's handler. +// +// Streaming RPCs should prefer the SetHeader method of the ServerStream. +// +// When called multiple times, all the provided metadata will be merged. All +// the metadata will be sent out when one of the following happens: +// +// - grpc.SendHeader is called, or for streaming handlers, stream.SendHeader. +// - The first response message is sent. For unary handlers, this occurs when +// the handler returns; for streaming handlers, this can happen when stream's +// SendMsg method is called. +// - An RPC status is sent out (error or success). This occurs when the handler +// returns. +// +// SetHeader will fail if called after any of the events above. +// +// The error returned is compatible with the status package. However, the +// status code will often not match the RPC status as seen by the client +// application, and therefore, should not be relied upon for this purpose. func SetHeader(ctx context.Context, md metadata.MD) error { if md.Len() == 0 { return nil @@ -1832,8 +1924,14 @@ func SetHeader(ctx context.Context, md metadata.MD) error { return stream.SetHeader(md) } -// SendHeader sends header metadata. It may be called at most once. -// The provided md and headers set by SetHeader() will be sent. +// SendHeader sends header metadata. It may be called at most once, and may not +// be called after any event that causes headers to be sent (see SetHeader for +// a complete list). The provided md and headers set by SetHeader() will be +// sent. +// +// The error returned is compatible with the status package. However, the +// status code will often not match the RPC status as seen by the client +// application, and therefore, should not be relied upon for this purpose. func SendHeader(ctx context.Context, md metadata.MD) error { stream := ServerTransportStreamFromContext(ctx) if stream == nil { @@ -1845,8 +1943,66 @@ func SendHeader(ctx context.Context, md metadata.MD) error { return nil } +// SetSendCompressor sets a compressor for outbound messages from the server. +// It must not be called after any event that causes headers to be sent +// (see ServerStream.SetHeader for the complete list). Provided compressor is +// used when below conditions are met: +// +// - compressor is registered via encoding.RegisterCompressor +// - compressor name must exist in the client advertised compressor names +// sent in grpc-accept-encoding header. Use ClientSupportedCompressors to +// get client supported compressor names. +// +// The context provided must be the context passed to the server's handler. +// It must be noted that compressor name encoding.Identity disables the +// outbound compression. +// By default, server messages will be sent using the same compressor with +// which request messages were sent. +// +// It is not safe to call SetSendCompressor concurrently with SendHeader and +// SendMsg. +// +// # Experimental +// +// Notice: This function is EXPERIMENTAL and may be changed or removed in a +// later release. +func SetSendCompressor(ctx context.Context, name string) error { + stream, ok := ServerTransportStreamFromContext(ctx).(*transport.Stream) + if !ok || stream == nil { + return fmt.Errorf("failed to fetch the stream from the given context") + } + + if err := validateSendCompressor(name, stream.ClientAdvertisedCompressors()); err != nil { + return fmt.Errorf("unable to set send compressor: %w", err) + } + + return stream.SetSendCompress(name) +} + +// ClientSupportedCompressors returns compressor names advertised by the client +// via grpc-accept-encoding header. +// +// The context provided must be the context passed to the server's handler. +// +// # Experimental +// +// Notice: This function is EXPERIMENTAL and may be changed or removed in a +// later release. +func ClientSupportedCompressors(ctx context.Context) ([]string, error) { + stream, ok := ServerTransportStreamFromContext(ctx).(*transport.Stream) + if !ok || stream == nil { + return nil, fmt.Errorf("failed to fetch the stream from the given context %v", ctx) + } + + return strings.Split(stream.ClientAdvertisedCompressors(), ","), nil +} + // SetTrailer sets the trailer metadata that will be sent when an RPC returns. // When called more than once, all the provided metadata will be merged. +// +// The error returned is compatible with the status package. However, the +// status code will often not match the RPC status as seen by the client +// application, and therefore, should not be relied upon for this purpose. func SetTrailer(ctx context.Context, md metadata.MD) error { if md.Len() == 0 { return nil @@ -1875,3 +2031,51 @@ type channelzServer struct { func (c *channelzServer) ChannelzMetric() *channelz.ServerInternalMetric { return c.s.channelzMetric() } + +// validateSendCompressor returns an error when given compressor name cannot be +// handled by the server or the client based on the advertised compressors. +func validateSendCompressor(name, clientCompressors string) error { + if name == encoding.Identity { + return nil + } + + if !grpcutil.IsCompressorNameRegistered(name) { + return fmt.Errorf("compressor not registered %q", name) + } + + for _, c := range strings.Split(clientCompressors, ",") { + if c == name { + return nil // found match + } + } + return fmt.Errorf("client does not support compressor %q", name) +} + +// atomicSemaphore implements a blocking, counting semaphore. acquire should be +// called synchronously; release may be called asynchronously. +type atomicSemaphore struct { + n int64 + wait chan struct{} +} + +func (q *atomicSemaphore) acquire() { + if atomic.AddInt64(&q.n, -1) < 0 { + // We ran out of quota. Block until a release happens. + <-q.wait + } +} + +func (q *atomicSemaphore) release() { + // N.B. the "<= 0" check below should allow for this to work with multiple + // concurrent calls to acquire, but also note that with synchronous calls to + // acquire, as our system does, n will never be less than -1. There are + // fairness issues (queuing) to consider if this was to be generalized. + if atomic.AddInt64(&q.n, 1) <= 0 { + // An acquire was waiting on us. Unblock it. + q.wait <- struct{}{} + } +} + +func newHandlerQuota(n uint32) *atomicSemaphore { + return &atomicSemaphore{n: int64(n), wait: make(chan struct{}, 1)} +} diff --git a/vendor/google.golang.org/grpc/service_config.go b/vendor/google.golang.org/grpc/service_config.go index 22c4240cf..0df11fc09 100644 --- a/vendor/google.golang.org/grpc/service_config.go +++ b/vendor/google.golang.org/grpc/service_config.go @@ -23,8 +23,6 @@ import ( "errors" "fmt" "reflect" - "strconv" - "strings" "time" "google.golang.org/grpc/codes" @@ -57,10 +55,9 @@ type lbConfig struct { type ServiceConfig struct { serviceconfig.Config - // LB is the load balancer the service providers recommends. The balancer - // specified via grpc.WithBalancerName will override this. This is deprecated; - // lbConfigs is preferred. If lbConfig and LB are both present, lbConfig - // will be used. + // LB is the load balancer the service providers recommends. This is + // deprecated; lbConfigs is preferred. If lbConfig and LB are both present, + // lbConfig will be used. LB *string // lbConfig is the service config's load balancing configuration. If @@ -107,8 +104,8 @@ type healthCheckConfig struct { type jsonRetryPolicy struct { MaxAttempts int - InitialBackoff string - MaxBackoff string + InitialBackoff internalserviceconfig.Duration + MaxBackoff internalserviceconfig.Duration BackoffMultiplier float64 RetryableStatusCodes []codes.Code } @@ -130,50 +127,6 @@ type retryThrottlingPolicy struct { TokenRatio float64 } -func parseDuration(s *string) (*time.Duration, error) { - if s == nil { - return nil, nil - } - if !strings.HasSuffix(*s, "s") { - return nil, fmt.Errorf("malformed duration %q", *s) - } - ss := strings.SplitN((*s)[:len(*s)-1], ".", 3) - if len(ss) > 2 { - return nil, fmt.Errorf("malformed duration %q", *s) - } - // hasDigits is set if either the whole or fractional part of the number is - // present, since both are optional but one is required. - hasDigits := false - var d time.Duration - if len(ss[0]) > 0 { - i, err := strconv.ParseInt(ss[0], 10, 32) - if err != nil { - return nil, fmt.Errorf("malformed duration %q: %v", *s, err) - } - d = time.Duration(i) * time.Second - hasDigits = true - } - if len(ss) == 2 && len(ss[1]) > 0 { - if len(ss[1]) > 9 { - return nil, fmt.Errorf("malformed duration %q", *s) - } - f, err := strconv.ParseInt(ss[1], 10, 64) - if err != nil { - return nil, fmt.Errorf("malformed duration %q: %v", *s, err) - } - for i := 9; i > len(ss[1]); i-- { - f *= 10 - } - d += time.Duration(f) - hasDigits = true - } - if !hasDigits { - return nil, fmt.Errorf("malformed duration %q", *s) - } - - return &d, nil -} - type jsonName struct { Service string Method string @@ -202,7 +155,7 @@ func (j jsonName) generatePath() (string, error) { type jsonMC struct { Name *[]jsonName WaitForReady *bool - Timeout *string + Timeout *internalserviceconfig.Duration MaxRequestMessageBytes *int64 MaxResponseMessageBytes *int64 RetryPolicy *jsonRetryPolicy @@ -218,7 +171,7 @@ type jsonSC struct { } func init() { - internal.ParseServiceConfigForTesting = parseServiceConfig + internal.ParseServiceConfig = parseServiceConfig } func parseServiceConfig(js string) *serviceconfig.ParseResult { if len(js) == 0 { @@ -227,7 +180,7 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult { var rsc jsonSC err := json.Unmarshal([]byte(js), &rsc) if err != nil { - logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err) + logger.Warningf("grpc: unmarshaling service config %s: %v", js, err) return &serviceconfig.ParseResult{Err: err} } sc := ServiceConfig{ @@ -253,18 +206,13 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult { if m.Name == nil { continue } - d, err := parseDuration(m.Timeout) - if err != nil { - logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err) - return &serviceconfig.ParseResult{Err: err} - } mc := MethodConfig{ WaitForReady: m.WaitForReady, - Timeout: d, + Timeout: (*time.Duration)(m.Timeout), } if mc.RetryPolicy, err = convertRetryPolicy(m.RetryPolicy); err != nil { - logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err) + logger.Warningf("grpc: unmarshaling service config %s: %v", js, err) return &serviceconfig.ParseResult{Err: err} } if m.MaxRequestMessageBytes != nil { @@ -284,13 +232,13 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult { for i, n := range *m.Name { path, err := n.generatePath() if err != nil { - logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to methodConfig[%d]: %v", js, i, err) + logger.Warningf("grpc: error unmarshaling service config %s due to methodConfig[%d]: %v", js, i, err) return &serviceconfig.ParseResult{Err: err} } if _, ok := paths[path]; ok { err = errDuplicatedName - logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to methodConfig[%d]: %v", js, i, err) + logger.Warningf("grpc: error unmarshaling service config %s due to methodConfig[%d]: %v", js, i, err) return &serviceconfig.ParseResult{Err: err} } paths[path] = struct{}{} @@ -313,18 +261,10 @@ func convertRetryPolicy(jrp *jsonRetryPolicy) (p *internalserviceconfig.RetryPol if jrp == nil { return nil, nil } - ib, err := parseDuration(&jrp.InitialBackoff) - if err != nil { - return nil, err - } - mb, err := parseDuration(&jrp.MaxBackoff) - if err != nil { - return nil, err - } if jrp.MaxAttempts <= 1 || - *ib <= 0 || - *mb <= 0 || + jrp.InitialBackoff <= 0 || + jrp.MaxBackoff <= 0 || jrp.BackoffMultiplier <= 0 || len(jrp.RetryableStatusCodes) == 0 { logger.Warningf("grpc: ignoring retry policy %v due to illegal configuration", jrp) @@ -333,8 +273,8 @@ func convertRetryPolicy(jrp *jsonRetryPolicy) (p *internalserviceconfig.RetryPol rp := &internalserviceconfig.RetryPolicy{ MaxAttempts: jrp.MaxAttempts, - InitialBackoff: *ib, - MaxBackoff: *mb, + InitialBackoff: time.Duration(jrp.InitialBackoff), + MaxBackoff: time.Duration(jrp.MaxBackoff), BackoffMultiplier: jrp.BackoffMultiplier, RetryableStatusCodes: make(map[codes.Code]bool), } @@ -381,6 +321,9 @@ func init() { // // If any of them is NOT *ServiceConfig, return false. func equalServiceConfig(a, b serviceconfig.Config) bool { + if a == nil && b == nil { + return true + } aa, ok := a.(*ServiceConfig) if !ok { return false diff --git a/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go b/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go index 73a2f9266..35e7a20a0 100644 --- a/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go +++ b/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go @@ -19,7 +19,7 @@ // Package serviceconfig defines types and methods for operating on gRPC // service configs. // -// Experimental +// # Experimental // // Notice: This package is EXPERIMENTAL and may be changed or removed in a // later release. diff --git a/vendor/google.golang.org/grpc/stats/stats.go b/vendor/google.golang.org/grpc/stats/stats.go index a5ebeeb69..7a552a9b7 100644 --- a/vendor/google.golang.org/grpc/stats/stats.go +++ b/vendor/google.golang.org/grpc/stats/stats.go @@ -36,12 +36,12 @@ type RPCStats interface { IsClient() bool } -// Begin contains stats when an RPC begins. +// Begin contains stats when an RPC attempt begins. // FailFast is only valid if this Begin is from client side. type Begin struct { // Client is true if this Begin is from client side. Client bool - // BeginTime is the time when the RPC begins. + // BeginTime is the time when the RPC attempt begins. BeginTime time.Time // FailFast indicates if this RPC is failfast. FailFast bool @@ -49,6 +49,9 @@ type Begin struct { IsClientStream bool // IsServerStream indicates whether the RPC is a server streaming RPC. IsServerStream bool + // IsTransparentRetryAttempt indicates whether this attempt was initiated + // due to transparently retrying a previous attempt. + IsTransparentRetryAttempt bool } // IsClient indicates if the stats information is from client side. @@ -64,10 +67,18 @@ type InPayload struct { Payload interface{} // Data is the serialized message payload. Data []byte - // Length is the length of uncompressed data. + + // Length is the size of the uncompressed payload data. Does not include any + // framing (gRPC or HTTP/2). Length int - // WireLength is the length of data on wire (compressed, signed, encrypted). + // CompressedLength is the size of the compressed payload data. Does not + // include any framing (gRPC or HTTP/2). Same as Length if compression not + // enabled. + CompressedLength int + // WireLength is the size of the compressed payload data plus gRPC framing. + // Does not include HTTP/2 framing. WireLength int + // RecvTime is the time when the payload is received. RecvTime time.Time } @@ -126,9 +137,15 @@ type OutPayload struct { Payload interface{} // Data is the serialized message payload. Data []byte - // Length is the length of uncompressed data. + // Length is the size of the uncompressed payload data. Does not include any + // framing (gRPC or HTTP/2). Length int - // WireLength is the length of data on wire (compressed, signed, encrypted). + // CompressedLength is the size of the compressed payload data. Does not + // include any framing (gRPC or HTTP/2). Same as Length if compression not + // enabled. + CompressedLength int + // WireLength is the size of the compressed payload data plus gRPC framing. + // Does not include HTTP/2 framing. WireLength int // SentTime is the time when the payload is sent. SentTime time.Time diff --git a/vendor/google.golang.org/grpc/status/status.go b/vendor/google.golang.org/grpc/status/status.go index 54d187186..bcf2e4d81 100644 --- a/vendor/google.golang.org/grpc/status/status.go +++ b/vendor/google.golang.org/grpc/status/status.go @@ -29,6 +29,7 @@ package status import ( "context" + "errors" "fmt" spb "google.golang.org/genproto/googleapis/rpc/status" @@ -73,19 +74,52 @@ func FromProto(s *spb.Status) *Status { return status.FromProto(s) } -// FromError returns a Status representing err if it was produced by this -// package or has a method `GRPCStatus() *Status`. -// If err is nil, a Status is returned with codes.OK and no message. -// Otherwise, ok is false and a Status is returned with codes.Unknown and -// the original error message. +// FromError returns a Status representation of err. +// +// - If err was produced by this package or implements the method `GRPCStatus() +// *Status` and `GRPCStatus()` does not return nil, or if err wraps a type +// satisfying this, the Status from `GRPCStatus()` is returned. For wrapped +// errors, the message returned contains the entire err.Error() text and not +// just the wrapped status. In that case, ok is true. +// +// - If err is nil, a Status is returned with codes.OK and no message, and ok +// is true. +// +// - If err implements the method `GRPCStatus() *Status` and `GRPCStatus()` +// returns nil (which maps to Codes.OK), or if err wraps a type +// satisfying this, a Status is returned with codes.Unknown and err's +// Error() message, and ok is false. +// +// - Otherwise, err is an error not compatible with this package. In this +// case, a Status is returned with codes.Unknown and err's Error() message, +// and ok is false. func FromError(err error) (s *Status, ok bool) { if err == nil { return nil, true } - if se, ok := err.(interface { - GRPCStatus() *Status - }); ok { - return se.GRPCStatus(), true + type grpcstatus interface{ GRPCStatus() *Status } + if gs, ok := err.(grpcstatus); ok { + if gs.GRPCStatus() == nil { + // Error has status nil, which maps to codes.OK. There + // is no sensible behavior for this, so we turn it into + // an error with codes.Unknown and discard the existing + // status. + return New(codes.Unknown, err.Error()), false + } + return gs.GRPCStatus(), true + } + var gs grpcstatus + if errors.As(err, &gs) { + if gs.GRPCStatus() == nil { + // Error wraps an error that has status nil, which maps + // to codes.OK. There is no sensible behavior for this, + // so we turn it into an error with codes.Unknown and + // discard the existing status. + return New(codes.Unknown, err.Error()), false + } + p := gs.GRPCStatus().Proto() + p.Message = err.Error() + return status.FromProto(p), true } return New(codes.Unknown, err.Error()), false } @@ -97,33 +131,30 @@ func Convert(err error) *Status { return s } -// Code returns the Code of the error if it is a Status error, codes.OK if err -// is nil, or codes.Unknown otherwise. +// Code returns the Code of the error if it is a Status error or if it wraps a +// Status error. If that is not the case, it returns codes.OK if err is nil, or +// codes.Unknown otherwise. func Code(err error) codes.Code { // Don't use FromError to avoid allocation of OK status. if err == nil { return codes.OK } - if se, ok := err.(interface { - GRPCStatus() *Status - }); ok { - return se.GRPCStatus().Code() - } - return codes.Unknown + + return Convert(err).Code() } -// FromContextError converts a context error into a Status. It returns a -// Status with codes.OK if err is nil, or a Status with codes.Unknown if err is -// non-nil and not a context error. +// FromContextError converts a context error or wrapped context error into a +// Status. It returns a Status with codes.OK if err is nil, or a Status with +// codes.Unknown if err is non-nil and not a context error. func FromContextError(err error) *Status { - switch err { - case nil: + if err == nil { return nil - case context.DeadlineExceeded: + } + if errors.Is(err, context.DeadlineExceeded) { return New(codes.DeadlineExceeded, err.Error()) - case context.Canceled: + } + if errors.Is(err, context.Canceled) { return New(codes.Canceled, err.Error()) - default: - return New(codes.Unknown, err.Error()) } + return New(codes.Unknown, err.Error()) } diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go index e224af12d..10092685b 100644 --- a/vendor/google.golang.org/grpc/stream.go +++ b/vendor/google.golang.org/grpc/stream.go @@ -36,8 +36,10 @@ import ( "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/internal/grpcutil" + imetadata "google.golang.org/grpc/internal/metadata" iresolver "google.golang.org/grpc/internal/resolver" "google.golang.org/grpc/internal/serviceconfig" + istatus "google.golang.org/grpc/internal/status" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" @@ -46,10 +48,12 @@ import ( ) // StreamHandler defines the handler called by gRPC server to complete the -// execution of a streaming RPC. If a StreamHandler returns an error, it -// should be produced by the status package, or else gRPC will use -// codes.Unknown as the status code and err.Error() as the status message -// of the RPC. +// execution of a streaming RPC. +// +// If a StreamHandler returns an error, it should either be produced by the +// status package, or be one of the context errors. Otherwise, gRPC will use +// codes.Unknown as the status code and err.Error() as the status message of the +// RPC. type StreamHandler func(srv interface{}, stream ServerStream) error // StreamDesc represents a streaming RPC service's method specification. Used @@ -119,6 +123,9 @@ type ClientStream interface { // calling RecvMsg on the same stream at the same time, but it is not safe // to call SendMsg on the same stream in different goroutines. It is also // not safe to call CloseSend concurrently with SendMsg. + // + // It is not safe to modify the message after calling SendMsg. Tracing + // libraries and stats handlers may use the message lazily. SendMsg(m interface{}) error // RecvMsg blocks until it receives a message into m or the stream is // done. It returns io.EOF when the stream completes successfully. On @@ -137,17 +144,22 @@ type ClientStream interface { // To ensure resources are not leaked due to the stream returned, one of the following // actions must be performed: // -// 1. Call Close on the ClientConn. -// 2. Cancel the context provided. -// 3. Call RecvMsg until a non-nil error is returned. A protobuf-generated -// client-streaming RPC, for instance, might use the helper function -// CloseAndRecv (note that CloseSend does not Recv, therefore is not -// guaranteed to release all resources). -// 4. Receive a non-nil, non-io.EOF error from Header or SendMsg. +// 1. Call Close on the ClientConn. +// 2. Cancel the context provided. +// 3. Call RecvMsg until a non-nil error is returned. A protobuf-generated +// client-streaming RPC, for instance, might use the helper function +// CloseAndRecv (note that CloseSend does not Recv, therefore is not +// guaranteed to release all resources). +// 4. Receive a non-nil, non-io.EOF error from Header or SendMsg. // // If none of the above happen, a goroutine and a context will be leaked, and grpc // will not call the optionally-configured stats handler with a stats.End message. func (cc *ClientConn) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) { + if err := cc.idlenessMgr.onCallBegin(); err != nil { + return nil, err + } + defer cc.idlenessMgr.onCallEnd() + // allow interceptor to see all applicable call options, which means those // configured as defaults from dial option as well as per-call options opts = combine(cc.dopts.callOptions, opts) @@ -164,6 +176,20 @@ func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth } func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) { + if md, added, ok := metadata.FromOutgoingContextRaw(ctx); ok { + // validate md + if err := imetadata.Validate(md); err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + // validate added + for _, kvs := range added { + for i := 0; i < len(kvs); i += 2 { + if err := imetadata.ValidatePair(kvs[i], kvs[i+1]); err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + } + } + } if channelz.IsOn() { cc.incrCallsStarted() defer func() { @@ -187,6 +213,13 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth rpcInfo := iresolver.RPCInfo{Context: ctx, Method: method} rpcConfig, err := cc.safeConfigSelector.SelectConfig(rpcInfo) if err != nil { + if st, ok := status.FromError(err); ok { + // Restrict the code to the list allowed by gRFC A54. + if istatus.IsRestrictedControlPlaneCode(st) { + err = status.Errorf(codes.Internal, "config selector returned illegal status: %v", err) + } + return nil, err + } return nil, toRPCErr(err) } @@ -274,35 +307,6 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client if c.creds != nil { callHdr.Creds = c.creds } - var trInfo *traceInfo - if EnableTracing { - trInfo = &traceInfo{ - tr: trace.New("grpc.Sent."+methodFamily(method), method), - firstLine: firstLine{ - client: true, - }, - } - if deadline, ok := ctx.Deadline(); ok { - trInfo.firstLine.deadline = time.Until(deadline) - } - trInfo.tr.LazyLog(&trInfo.firstLine, false) - ctx = trace.NewContext(ctx, trInfo.tr) - } - ctx = newContextWithRPCInfo(ctx, c.failFast, c.codec, cp, comp) - sh := cc.dopts.copts.StatsHandler - var beginTime time.Time - if sh != nil { - ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: c.failFast}) - beginTime = time.Now() - begin := &stats.Begin{ - Client: true, - BeginTime: beginTime, - FailFast: c.failFast, - IsClientStream: desc.ClientStreams, - IsServerStream: desc.ServerStreams, - } - sh.HandleRPC(ctx, begin) - } cs := &clientStream{ callHdr: callHdr, @@ -316,29 +320,41 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client cp: cp, comp: comp, cancel: cancel, - beginTime: beginTime, firstAttempt: true, onCommit: onCommit, } if !cc.dopts.disableRetry { cs.retryThrottler = cc.retryThrottler.Load().(*retryThrottler) } - cs.binlog = binarylog.GetMethodLogger(method) - - // Only this initial attempt has stats/tracing. - // TODO(dfawley): move to newAttempt when per-attempt stats are implemented. - if err := cs.newAttemptLocked(sh, trInfo); err != nil { - cs.finish(err) - return nil, err + if ml := binarylog.GetMethodLogger(method); ml != nil { + cs.binlogs = append(cs.binlogs, ml) + } + if cc.dopts.binaryLogger != nil { + if ml := cc.dopts.binaryLogger.GetMethodLogger(method); ml != nil { + cs.binlogs = append(cs.binlogs, ml) + } } - op := func(a *csAttempt) error { return a.newStream() } + // Pick the transport to use and create a new stream on the transport. + // Assign cs.attempt upon success. + op := func(a *csAttempt) error { + if err := a.getTransport(); err != nil { + return err + } + if err := a.newStream(); err != nil { + return err + } + // Because this operation is always called either here (while creating + // the clientStream) or by the retry code while locked when replaying + // the operation, it is safe to access cs.attempt directly. + cs.attempt = a + return nil + } if err := cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) }); err != nil { - cs.finish(err) return nil, err } - if cs.binlog != nil { + if len(cs.binlogs) != 0 { md, _ := metadata.FromOutgoingContext(ctx) logEntry := &binarylog.ClientHeader{ OnClientSide: true, @@ -352,7 +368,9 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client logEntry.Timeout = 0 } } - cs.binlog.Log(logEntry) + for _, binlog := range cs.binlogs { + binlog.Log(cs.ctx, logEntry) + } } if desc != unaryStreamDesc { @@ -373,60 +391,123 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client return cs, nil } -// newAttemptLocked creates a new attempt with a transport. -// If it succeeds, then it replaces clientStream's attempt with this new attempt. -func (cs *clientStream) newAttemptLocked(sh stats.Handler, trInfo *traceInfo) (retErr error) { - newAttempt := &csAttempt{ - cs: cs, - dc: cs.cc.dopts.dc, - statsHandler: sh, - trInfo: trInfo, +// newAttemptLocked creates a new csAttempt without a transport or stream. +func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error) { + if err := cs.ctx.Err(); err != nil { + return nil, toRPCErr(err) } - defer func() { - if retErr != nil { - // This attempt is not set in the clientStream, so it's finish won't - // be called. Call it here for stats and trace in case they are not - // nil. - newAttempt.finish(retErr) + if err := cs.cc.ctx.Err(); err != nil { + return nil, ErrClientConnClosing + } + + ctx := newContextWithRPCInfo(cs.ctx, cs.callInfo.failFast, cs.callInfo.codec, cs.cp, cs.comp) + method := cs.callHdr.Method + var beginTime time.Time + shs := cs.cc.dopts.copts.StatsHandlers + for _, sh := range shs { + ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: cs.callInfo.failFast}) + beginTime = time.Now() + begin := &stats.Begin{ + Client: true, + BeginTime: beginTime, + FailFast: cs.callInfo.failFast, + IsClientStream: cs.desc.ClientStreams, + IsServerStream: cs.desc.ServerStreams, + IsTransparentRetryAttempt: isTransparent, } - }() + sh.HandleRPC(ctx, begin) + } - if err := cs.ctx.Err(); err != nil { - return toRPCErr(err) + var trInfo *traceInfo + if EnableTracing { + trInfo = &traceInfo{ + tr: trace.New("grpc.Sent."+methodFamily(method), method), + firstLine: firstLine{ + client: true, + }, + } + if deadline, ok := ctx.Deadline(); ok { + trInfo.firstLine.deadline = time.Until(deadline) + } + trInfo.tr.LazyLog(&trInfo.firstLine, false) + ctx = trace.NewContext(ctx, trInfo.tr) } - ctx := cs.ctx - if cs.cc.parsedTarget.Scheme == "xds" { + if cs.cc.parsedTarget.URL.Scheme == "xds" { // Add extra metadata (metadata that will be added by transport) to context // so the balancer can see them. - ctx = grpcutil.WithExtraMetadata(cs.ctx, metadata.Pairs( + ctx = grpcutil.WithExtraMetadata(ctx, metadata.Pairs( "content-type", grpcutil.ContentType(cs.callHdr.ContentSubtype), )) } - t, done, err := cs.cc.getTransport(ctx, cs.callInfo.failFast, cs.callHdr.Method) + + return &csAttempt{ + ctx: ctx, + beginTime: beginTime, + cs: cs, + dc: cs.cc.dopts.dc, + statsHandlers: shs, + trInfo: trInfo, + }, nil +} + +func (a *csAttempt) getTransport() error { + cs := a.cs + + var err error + a.t, a.pickResult, err = cs.cc.getTransport(a.ctx, cs.callInfo.failFast, cs.callHdr.Method) if err != nil { + if de, ok := err.(dropError); ok { + err = de.error + a.drop = true + } return err } - if trInfo != nil { - trInfo.firstLine.SetRemoteAddr(t.RemoteAddr()) + if a.trInfo != nil { + a.trInfo.firstLine.SetRemoteAddr(a.t.RemoteAddr()) } - newAttempt.t = t - newAttempt.done = done - cs.attempt = newAttempt return nil } func (a *csAttempt) newStream() error { cs := a.cs cs.callHdr.PreviousAttempts = cs.numRetries - s, err := a.t.NewStream(cs.ctx, cs.callHdr) + + // Merge metadata stored in PickResult, if any, with existing call metadata. + // It is safe to overwrite the csAttempt's context here, since all state + // maintained in it are local to the attempt. When the attempt has to be + // retried, a new instance of csAttempt will be created. + if a.pickResult.Metadata != nil { + // We currently do not have a function it the metadata package which + // merges given metadata with existing metadata in a context. Existing + // function `AppendToOutgoingContext()` takes a variadic argument of key + // value pairs. + // + // TODO: Make it possible to retrieve key value pairs from metadata.MD + // in a form passable to AppendToOutgoingContext(), or create a version + // of AppendToOutgoingContext() that accepts a metadata.MD. + md, _ := metadata.FromOutgoingContext(a.ctx) + md = metadata.Join(md, a.pickResult.Metadata) + a.ctx = metadata.NewOutgoingContext(a.ctx, md) + } + + s, err := a.t.NewStream(a.ctx, cs.callHdr) if err != nil { - // Return without converting to an RPC error so retry code can - // inspect. - return err + nse, ok := err.(*transport.NewStreamError) + if !ok { + // Unexpected. + return err + } + + if nse.AllowTransparentRetry { + a.allowTransparentRetry = true + } + + // Unwrap and convert error. + return toRPCErr(nse.Err) } - cs.attempt.s = s - cs.attempt.p = &parser{r: s} + a.s = s + a.p = &parser{r: s} return nil } @@ -444,8 +525,7 @@ type clientStream struct { cancel context.CancelFunc // cancels all attempts - sentLast bool // sent an end stream - beginTime time.Time + sentLast bool // sent an end stream methodConfig *MethodConfig @@ -453,7 +533,7 @@ type clientStream struct { retryThrottler *retryThrottler // The throttler active when the RPC began. - binlog *binarylog.MethodLogger // Binary logger, can be nil. + binlogs []binarylog.MethodLogger // serverHeaderBinlogged is a boolean for whether server header has been // logged. Server header will be logged when the first time one of those // happens: stream.Header(), stream.Recv(). @@ -485,11 +565,12 @@ type clientStream struct { // csAttempt implements a single transport stream attempt within a // clientStream. type csAttempt struct { - cs *clientStream - t transport.ClientTransport - s *transport.Stream - p *parser - done func(balancer.DoneInfo) + ctx context.Context + cs *clientStream + t transport.ClientTransport + s *transport.Stream + p *parser + pickResult balancer.PickResult finished bool dc Decompressor @@ -502,7 +583,13 @@ type csAttempt struct { // and cleared when the finish method is called. trInfo *traceInfo - statsHandler stats.Handler + statsHandlers []stats.Handler + beginTime time.Time + + // set for newStream errors that may be transparently retried + allowTransparentRetry bool + // set for pick errors that are returned as a status + drop bool } func (cs *clientStream) commitAttemptLocked() { @@ -520,95 +607,76 @@ func (cs *clientStream) commitAttempt() { } // shouldRetry returns nil if the RPC should be retried; otherwise it returns -// the error that should be returned by the operation. -func (cs *clientStream) shouldRetry(err error) error { - if cs.attempt.s == nil { - // Error from NewClientStream. - nse, ok := err.(*transport.NewStreamError) - if !ok { - // Unexpected, but assume no I/O was performed and the RPC is not - // fatal, so retry indefinitely. - return nil - } - - // Unwrap and convert error. - err = toRPCErr(nse.Err) - - // Never retry DoNotRetry errors, which indicate the RPC should not be - // retried due to max header list size violation, etc. - if nse.DoNotRetry { - return err - } +// the error that should be returned by the operation. If the RPC should be +// retried, the bool indicates whether it is being retried transparently. +func (a *csAttempt) shouldRetry(err error) (bool, error) { + cs := a.cs - // In the event of a non-IO operation error from NewStream, we never - // attempted to write anything to the wire, so we can retry - // indefinitely. - if !nse.PerformedIO { - return nil - } + if cs.finished || cs.committed || a.drop { + // RPC is finished or committed or was dropped by the picker; cannot retry. + return false, err } - if cs.finished || cs.committed { - // RPC is finished or committed; cannot retry. - return err + if a.s == nil && a.allowTransparentRetry { + return true, nil } // Wait for the trailers. unprocessed := false - if cs.attempt.s != nil { - <-cs.attempt.s.Done() - unprocessed = cs.attempt.s.Unprocessed() + if a.s != nil { + <-a.s.Done() + unprocessed = a.s.Unprocessed() } if cs.firstAttempt && unprocessed { // First attempt, stream unprocessed: transparently retry. - return nil + return true, nil } if cs.cc.dopts.disableRetry { - return err + return false, err } pushback := 0 hasPushback := false - if cs.attempt.s != nil { - if !cs.attempt.s.TrailersOnly() { - return err + if a.s != nil { + if !a.s.TrailersOnly() { + return false, err } // TODO(retry): Move down if the spec changes to not check server pushback // before considering this a failure for throttling. - sps := cs.attempt.s.Trailer()["grpc-retry-pushback-ms"] + sps := a.s.Trailer()["grpc-retry-pushback-ms"] if len(sps) == 1 { var e error if pushback, e = strconv.Atoi(sps[0]); e != nil || pushback < 0 { channelz.Infof(logger, cs.cc.channelzID, "Server retry pushback specified to abort (%q).", sps[0]) cs.retryThrottler.throttle() // This counts as a failure for throttling. - return err + return false, err } hasPushback = true } else if len(sps) > 1 { channelz.Warningf(logger, cs.cc.channelzID, "Server retry pushback specified multiple values (%q); not retrying.", sps) cs.retryThrottler.throttle() // This counts as a failure for throttling. - return err + return false, err } } var code codes.Code - if cs.attempt.s != nil { - code = cs.attempt.s.Status().Code() + if a.s != nil { + code = a.s.Status().Code() } else { - code = status.Convert(err).Code() + code = status.Code(err) } rp := cs.methodConfig.RetryPolicy if rp == nil || !rp.RetryableStatusCodes[code] { - return err + return false, err } // Note: the ordering here is important; we count this as a failure // only if the code matched a retryable code. if cs.retryThrottler.throttle() { - return err + return false, err } if cs.numRetries+1 >= rp.MaxAttempts { - return err + return false, err } var dur time.Duration @@ -631,26 +699,32 @@ func (cs *clientStream) shouldRetry(err error) error { select { case <-t.C: cs.numRetries++ - return nil + return false, nil case <-cs.ctx.Done(): t.Stop() - return status.FromContextError(cs.ctx.Err()).Err() + return false, status.FromContextError(cs.ctx.Err()).Err() } } // Returns nil if a retry was performed and succeeded; error otherwise. -func (cs *clientStream) retryLocked(lastErr error) error { +func (cs *clientStream) retryLocked(attempt *csAttempt, lastErr error) error { for { - cs.attempt.finish(toRPCErr(lastErr)) - if err := cs.shouldRetry(lastErr); err != nil { + attempt.finish(toRPCErr(lastErr)) + isTransparent, err := attempt.shouldRetry(lastErr) + if err != nil { cs.commitAttemptLocked() return err } cs.firstAttempt = false - if err := cs.newAttemptLocked(nil, nil); err != nil { + attempt, err = cs.newAttemptLocked(isTransparent) + if err != nil { + // Only returns error if the clientconn is closed or the context of + // the stream is canceled. return err } - if lastErr = cs.replayBufferLocked(); lastErr == nil { + // Note that the first op in the replay buffer always sets cs.attempt + // if it is able to pick a transport and create a stream. + if lastErr = cs.replayBufferLocked(attempt); lastErr == nil { return nil } } @@ -660,7 +734,10 @@ func (cs *clientStream) Context() context.Context { cs.commitAttempt() // No need to lock before using attempt, since we know it is committed and // cannot change. - return cs.attempt.s.Context() + if cs.attempt.s != nil { + return cs.attempt.s.Context() + } + return cs.ctx } func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) error { @@ -674,6 +751,18 @@ func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) // already be status errors. return toRPCErr(op(cs.attempt)) } + if len(cs.buffer) == 0 { + // For the first op, which controls creation of the stream and + // assigns cs.attempt, we need to create a new attempt inline + // before executing the first op. On subsequent ops, the attempt + // is created immediately before replaying the ops. + var err error + if cs.attempt, err = cs.newAttemptLocked(false /* isTransparent */); err != nil { + cs.mu.Unlock() + cs.finish(err) + return err + } + } a := cs.attempt cs.mu.Unlock() err := op(a) @@ -690,7 +779,7 @@ func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) cs.mu.Unlock() return err } - if err := cs.retryLocked(err); err != nil { + if err := cs.retryLocked(a, err); err != nil { cs.mu.Unlock() return err } @@ -699,17 +788,25 @@ func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) func (cs *clientStream) Header() (metadata.MD, error) { var m metadata.MD + noHeader := false err := cs.withRetry(func(a *csAttempt) error { var err error m, err = a.s.Header() + if err == transport.ErrNoHeaders { + noHeader = true + return nil + } return toRPCErr(err) }, cs.commitAttemptLocked) + if err != nil { cs.finish(err) return nil, err } - if cs.binlog != nil && !cs.serverHeaderBinlogged { - // Only log if binary log is on and header has not been logged. + + if len(cs.binlogs) != 0 && !cs.serverHeaderBinlogged && !noHeader { + // Only log if binary log is on and header has not been logged, and + // there is actually headers to log. logEntry := &binarylog.ServerHeader{ OnClientSide: true, Header: m, @@ -718,10 +815,12 @@ func (cs *clientStream) Header() (metadata.MD, error) { if peer, ok := peer.FromContext(cs.Context()); ok { logEntry.PeerAddr = peer.Addr } - cs.binlog.Log(logEntry) cs.serverHeaderBinlogged = true + for _, binlog := range cs.binlogs { + binlog.Log(cs.ctx, logEntry) + } } - return m, err + return m, nil } func (cs *clientStream) Trailer() metadata.MD { @@ -739,10 +838,9 @@ func (cs *clientStream) Trailer() metadata.MD { return cs.attempt.s.Trailer() } -func (cs *clientStream) replayBufferLocked() error { - a := cs.attempt +func (cs *clientStream) replayBufferLocked(attempt *csAttempt) error { for _, f := range cs.buffer { - if err := f(a); err != nil { + if err := f(attempt); err != nil { return err } } @@ -790,47 +888,48 @@ func (cs *clientStream) SendMsg(m interface{}) (err error) { if len(payload) > *cs.callInfo.maxSendMessageSize { return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), *cs.callInfo.maxSendMessageSize) } - msgBytes := data // Store the pointer before setting to nil. For binary logging. op := func(a *csAttempt) error { - err := a.sendMsg(m, hdr, payload, data) - // nil out the message and uncomp when replaying; they are only needed for - // stats which is disabled for subsequent attempts. - m, data = nil, nil - return err + return a.sendMsg(m, hdr, payload, data) } err = cs.withRetry(op, func() { cs.bufferForRetryLocked(len(hdr)+len(payload), op) }) - if cs.binlog != nil && err == nil { - cs.binlog.Log(&binarylog.ClientMessage{ + if len(cs.binlogs) != 0 && err == nil { + cm := &binarylog.ClientMessage{ OnClientSide: true, - Message: msgBytes, - }) + Message: data, + } + for _, binlog := range cs.binlogs { + binlog.Log(cs.ctx, cm) + } } - return + return err } func (cs *clientStream) RecvMsg(m interface{}) error { - if cs.binlog != nil && !cs.serverHeaderBinlogged { + if len(cs.binlogs) != 0 && !cs.serverHeaderBinlogged { // Call Header() to binary log header if it's not already logged. cs.Header() } var recvInfo *payloadInfo - if cs.binlog != nil { + if len(cs.binlogs) != 0 { recvInfo = &payloadInfo{} } err := cs.withRetry(func(a *csAttempt) error { return a.recvMsg(m, recvInfo) }, cs.commitAttemptLocked) - if cs.binlog != nil && err == nil { - cs.binlog.Log(&binarylog.ServerMessage{ + if len(cs.binlogs) != 0 && err == nil { + sm := &binarylog.ServerMessage{ OnClientSide: true, Message: recvInfo.uncompressedBytes, - }) + } + for _, binlog := range cs.binlogs { + binlog.Log(cs.ctx, sm) + } } if err != nil || !cs.desc.ServerStreams { // err != nil or non-server-streaming indicates end of stream. cs.finish(err) - if cs.binlog != nil { + if len(cs.binlogs) != 0 { // finish will not log Trailer. Log Trailer here. logEntry := &binarylog.ServerTrailer{ OnClientSide: true, @@ -843,7 +942,9 @@ func (cs *clientStream) RecvMsg(m interface{}) error { if peer, ok := peer.FromContext(cs.Context()); ok { logEntry.PeerAddr = peer.Addr } - cs.binlog.Log(logEntry) + for _, binlog := range cs.binlogs { + binlog.Log(cs.ctx, logEntry) + } } } return err @@ -864,10 +965,13 @@ func (cs *clientStream) CloseSend() error { return nil } cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) }) - if cs.binlog != nil { - cs.binlog.Log(&binarylog.ClientHalfClose{ + if len(cs.binlogs) != 0 { + chc := &binarylog.ClientHalfClose{ OnClientSide: true, - }) + } + for _, binlog := range cs.binlogs { + binlog.Log(cs.ctx, chc) + } } // We never returned an error here for reasons. return nil @@ -884,6 +988,9 @@ func (cs *clientStream) finish(err error) { return } cs.finished = true + for _, onFinish := range cs.callInfo.onFinish { + onFinish(err) + } cs.commitAttemptLocked() if cs.attempt != nil { cs.attempt.finish(err) @@ -900,10 +1007,13 @@ func (cs *clientStream) finish(err error) { // // Only one of cancel or trailer needs to be logged. In the cases where // users don't call RecvMsg, users must have already canceled the RPC. - if cs.binlog != nil && status.Code(err) == codes.Canceled { - cs.binlog.Log(&binarylog.Cancel{ + if len(cs.binlogs) != 0 && status.Code(err) == codes.Canceled { + c := &binarylog.Cancel{ OnClientSide: true, - }) + } + for _, binlog := range cs.binlogs { + binlog.Log(cs.ctx, c) + } } if err == nil { cs.retryThrottler.successfulRPC() @@ -936,8 +1046,8 @@ func (a *csAttempt) sendMsg(m interface{}, hdr, payld, data []byte) error { } return io.EOF } - if a.statsHandler != nil { - a.statsHandler.HandleRPC(cs.ctx, outPayload(true, m, data, payld, time.Now())) + for _, sh := range a.statsHandlers { + sh.HandleRPC(a.ctx, outPayload(true, m, data, payld, time.Now())) } if channelz.IsOn() { a.t.IncrMsgSent() @@ -947,7 +1057,7 @@ func (a *csAttempt) sendMsg(m interface{}, hdr, payld, data []byte) error { func (a *csAttempt) recvMsg(m interface{}, payInfo *payloadInfo) (err error) { cs := a.cs - if a.statsHandler != nil && payInfo == nil { + if len(a.statsHandlers) != 0 && payInfo == nil { payInfo = &payloadInfo{} } @@ -975,6 +1085,7 @@ func (a *csAttempt) recvMsg(m interface{}, payInfo *payloadInfo) (err error) { } return io.EOF // indicates successful end of stream. } + return toRPCErr(err) } if a.trInfo != nil { @@ -984,15 +1095,16 @@ func (a *csAttempt) recvMsg(m interface{}, payInfo *payloadInfo) (err error) { } a.mu.Unlock() } - if a.statsHandler != nil { - a.statsHandler.HandleRPC(cs.ctx, &stats.InPayload{ + for _, sh := range a.statsHandlers { + sh.HandleRPC(a.ctx, &stats.InPayload{ Client: true, RecvTime: time.Now(), Payload: m, // TODO truncate large payload. - Data: payInfo.uncompressedBytes, - WireLength: payInfo.wireLength + headerLen, - Length: len(payInfo.uncompressedBytes), + Data: payInfo.uncompressedBytes, + WireLength: payInfo.compressedLength + headerLen, + CompressedLength: payInfo.compressedLength, + Length: len(payInfo.uncompressedBytes), }) } if channelz.IsOn() { @@ -1031,12 +1143,12 @@ func (a *csAttempt) finish(err error) { tr = a.s.Trailer() } - if a.done != nil { + if a.pickResult.Done != nil { br := false if a.s != nil { br = a.s.BytesReceived() } - a.done(balancer.DoneInfo{ + a.pickResult.Done(balancer.DoneInfo{ Err: err, Trailer: tr, BytesSent: a.s != nil, @@ -1044,15 +1156,15 @@ func (a *csAttempt) finish(err error) { ServerLoad: balancerload.Parse(tr), }) } - if a.statsHandler != nil { + for _, sh := range a.statsHandlers { end := &stats.End{ Client: true, - BeginTime: a.cs.beginTime, + BeginTime: a.beginTime, EndTime: time.Now(), Trailer: tr, Error: err, } - a.statsHandler.HandleRPC(a.cs.ctx, end) + sh.HandleRPC(a.ctx, end) } if a.trInfo != nil && a.trInfo.tr != nil { if err == nil { @@ -1161,14 +1273,19 @@ func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method strin as.p = &parser{r: s} ac.incrCallsStarted() if desc != unaryStreamDesc { - // Listen on cc and stream contexts to cleanup when the user closes the - // ClientConn or cancels the stream context. In all other cases, an error - // should already be injected into the recv buffer by the transport, which - // the client will eventually receive, and then we will cancel the stream's - // context in clientStream.finish. + // Listen on stream context to cleanup when the stream context is + // canceled. Also listen for the addrConn's context in case the + // addrConn is closed or reconnects to a different address. In all + // other cases, an error should already be injected into the recv + // buffer by the transport, which the client will eventually receive, + // and then we will cancel the stream's context in + // addrConnStream.finish. go func() { + ac.mu.Lock() + acCtx := ac.ctx + ac.mu.Unlock() select { - case <-ac.ctx.Done(): + case <-acCtx.Done(): as.finish(status.Error(codes.Canceled, "grpc: the SubConn is closing")) case <-ctx.Done(): as.finish(toRPCErr(ctx.Err())) @@ -1357,8 +1474,10 @@ func (as *addrConnStream) finish(err error) { // ServerStream defines the server-side behavior of a streaming RPC. // -// All errors returned from ServerStream methods are compatible with the -// status package. +// Errors returned from ServerStream methods are compatible with the status +// package. However, the status code will often not match the RPC status as +// seen by the client application, and therefore, should not be relied upon for +// this purpose. type ServerStream interface { // SetHeader sets the header metadata. It may be called multiple times. // When call multiple times, all the provided metadata will be merged. @@ -1390,6 +1509,9 @@ type ServerStream interface { // It is safe to have a goroutine calling SendMsg and another goroutine // calling RecvMsg on the same stream at the same time, but it is not safe // to call SendMsg on the same stream in different goroutines. + // + // It is not safe to modify the message after calling SendMsg. Tracing + // libraries and stats handlers may use the message lazily. SendMsg(m interface{}) error // RecvMsg blocks until it receives a message into m or the stream is // done. It returns io.EOF when the client has performed a CloseSend. On @@ -1415,13 +1537,15 @@ type serverStream struct { comp encoding.Compressor decomp encoding.Compressor + sendCompressorName string + maxReceiveMessageSize int maxSendMessageSize int trInfo *traceInfo - statsHandler stats.Handler + statsHandler []stats.Handler - binlog *binarylog.MethodLogger + binlogs []binarylog.MethodLogger // serverHeaderBinlogged indicates whether server header has been logged. It // will happen when one of the following two happens: stream.SendHeader(), // stream.Send(). @@ -1441,17 +1565,29 @@ func (ss *serverStream) SetHeader(md metadata.MD) error { if md.Len() == 0 { return nil } + err := imetadata.Validate(md) + if err != nil { + return status.Error(codes.Internal, err.Error()) + } return ss.s.SetHeader(md) } func (ss *serverStream) SendHeader(md metadata.MD) error { - err := ss.t.WriteHeader(ss.s, md) - if ss.binlog != nil && !ss.serverHeaderBinlogged { + err := imetadata.Validate(md) + if err != nil { + return status.Error(codes.Internal, err.Error()) + } + + err = ss.t.WriteHeader(ss.s, md) + if len(ss.binlogs) != 0 && !ss.serverHeaderBinlogged { h, _ := ss.s.Header() - ss.binlog.Log(&binarylog.ServerHeader{ + sh := &binarylog.ServerHeader{ Header: h, - }) + } ss.serverHeaderBinlogged = true + for _, binlog := range ss.binlogs { + binlog.Log(ss.ctx, sh) + } } return err } @@ -1460,6 +1596,9 @@ func (ss *serverStream) SetTrailer(md metadata.MD) { if md.Len() == 0 { return } + if err := imetadata.Validate(md); err != nil { + logger.Errorf("stream: failed to validate md when setting trailer, err: %v", err) + } ss.s.SetTrailer(md) } @@ -1492,6 +1631,13 @@ func (ss *serverStream) SendMsg(m interface{}) (err error) { } }() + // Server handler could have set new compressor by calling SetSendCompressor. + // In case it is set, we need to use it for compressing outbound message. + if sendCompressorsName := ss.s.SendCompress(); sendCompressorsName != ss.sendCompressorName { + ss.comp = encoding.GetCompressor(sendCompressorsName) + ss.sendCompressorName = sendCompressorsName + } + // load hdr, payload, data hdr, payload, data, err := prepareMsg(m, ss.codec, ss.cp, ss.comp) if err != nil { @@ -1505,20 +1651,28 @@ func (ss *serverStream) SendMsg(m interface{}) (err error) { if err := ss.t.Write(ss.s, hdr, payload, &transport.Options{Last: false}); err != nil { return toRPCErr(err) } - if ss.binlog != nil { + if len(ss.binlogs) != 0 { if !ss.serverHeaderBinlogged { h, _ := ss.s.Header() - ss.binlog.Log(&binarylog.ServerHeader{ + sh := &binarylog.ServerHeader{ Header: h, - }) + } ss.serverHeaderBinlogged = true + for _, binlog := range ss.binlogs { + binlog.Log(ss.ctx, sh) + } } - ss.binlog.Log(&binarylog.ServerMessage{ + sm := &binarylog.ServerMessage{ Message: data, - }) + } + for _, binlog := range ss.binlogs { + binlog.Log(ss.ctx, sm) + } } - if ss.statsHandler != nil { - ss.statsHandler.HandleRPC(ss.s.Context(), outPayload(false, m, data, payload, time.Now())) + if len(ss.statsHandler) != 0 { + for _, sh := range ss.statsHandler { + sh.HandleRPC(ss.s.Context(), outPayload(false, m, data, payload, time.Now())) + } } return nil } @@ -1552,13 +1706,16 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) { } }() var payInfo *payloadInfo - if ss.statsHandler != nil || ss.binlog != nil { + if len(ss.statsHandler) != 0 || len(ss.binlogs) != 0 { payInfo = &payloadInfo{} } if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, payInfo, ss.decomp); err != nil { if err == io.EOF { - if ss.binlog != nil { - ss.binlog.Log(&binarylog.ClientHalfClose{}) + if len(ss.binlogs) != 0 { + chc := &binarylog.ClientHalfClose{} + for _, binlog := range ss.binlogs { + binlog.Log(ss.ctx, chc) + } } return err } @@ -1567,20 +1724,26 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) { } return toRPCErr(err) } - if ss.statsHandler != nil { - ss.statsHandler.HandleRPC(ss.s.Context(), &stats.InPayload{ - RecvTime: time.Now(), - Payload: m, - // TODO truncate large payload. - Data: payInfo.uncompressedBytes, - WireLength: payInfo.wireLength + headerLen, - Length: len(payInfo.uncompressedBytes), - }) + if len(ss.statsHandler) != 0 { + for _, sh := range ss.statsHandler { + sh.HandleRPC(ss.s.Context(), &stats.InPayload{ + RecvTime: time.Now(), + Payload: m, + // TODO truncate large payload. + Data: payInfo.uncompressedBytes, + Length: len(payInfo.uncompressedBytes), + WireLength: payInfo.compressedLength + headerLen, + CompressedLength: payInfo.compressedLength, + }) + } } - if ss.binlog != nil { - ss.binlog.Log(&binarylog.ClientMessage{ + if len(ss.binlogs) != 0 { + cm := &binarylog.ClientMessage{ Message: payInfo.uncompressedBytes, - }) + } + for _, binlog := range ss.binlogs { + binlog.Log(ss.ctx, cm) + } } return nil } diff --git a/vendor/google.golang.org/grpc/tap/tap.go b/vendor/google.golang.org/grpc/tap/tap.go index dbf34e6bb..bfa5dfa40 100644 --- a/vendor/google.golang.org/grpc/tap/tap.go +++ b/vendor/google.golang.org/grpc/tap/tap.go @@ -19,7 +19,7 @@ // Package tap defines the function handles which are executed on the transport // layer of gRPC-Go and related information. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index e3510e10f..3cc754062 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.40.0" +const Version = "1.56.3" diff --git a/vendor/google.golang.org/grpc/vet.sh b/vendor/google.golang.org/grpc/vet.sh index 5eaa8b05d..a8e4732b3 100644 --- a/vendor/google.golang.org/grpc/vet.sh +++ b/vendor/google.golang.org/grpc/vet.sh @@ -41,16 +41,8 @@ if [[ "$1" = "-install" ]]; then github.com/client9/misspell/cmd/misspell popd if [[ -z "${VET_SKIP_PROTO}" ]]; then - if [[ "${TRAVIS}" = "true" ]]; then - PROTOBUF_VERSION=3.14.0 - PROTOC_FILENAME=protoc-${PROTOBUF_VERSION}-linux-x86_64.zip - pushd /home/travis - wget https://github.com/google/protobuf/releases/download/v${PROTOBUF_VERSION}/${PROTOC_FILENAME} - unzip ${PROTOC_FILENAME} - bin/protoc --version - popd - elif [[ "${GITHUB_ACTIONS}" = "true" ]]; then - PROTOBUF_VERSION=3.14.0 + if [[ "${GITHUB_ACTIONS}" = "true" ]]; then + PROTOBUF_VERSION=22.0 # a.k.a v4.22.0 in pb.go files. PROTOC_FILENAME=protoc-${PROTOBUF_VERSION}-linux-x86_64.zip pushd /home/runner/go wget https://github.com/google/protobuf/releases/download/v${PROTOBUF_VERSION}/${PROTOC_FILENAME} @@ -66,8 +58,20 @@ elif [[ "$#" -ne 0 ]]; then die "Unknown argument(s): $*" fi +# - Check that generated proto files are up to date. +if [[ -z "${VET_SKIP_PROTO}" ]]; then + make proto && git status --porcelain 2>&1 | fail_on_output || \ + (git status; git --no-pager diff; exit 1) +fi + +if [[ -n "${VET_ONLY_PROTO}" ]]; then + exit 0 +fi + # - Ensure all source files contain a copyright message. -not git grep -L "\(Copyright [0-9]\{4,\} gRPC authors\)\|DO NOT EDIT" -- '*.go' +# (Done in two parts because Darwin "git grep" has broken support for compound +# exclusion matches.) +(grep -L "DO NOT EDIT" $(git grep -L "\(Copyright [0-9]\{4,\} gRPC authors\)" -- '*.go') || true) | fail_on_output # - Make sure all tests in grpc and grpc/test use leakcheck via Teardown. not grep 'func Test[^(]' *_test.go @@ -81,7 +85,7 @@ not git grep -l 'x/net/context' -- "*.go" git grep -l '"math/rand"' -- "*.go" 2>&1 | not grep -v '^examples\|^stress\|grpcrand\|^benchmark\|wrr_test' # - Do not call grpclog directly. Use grpclog.Component instead. -git grep -l 'grpclog.I\|grpclog.W\|grpclog.E\|grpclog.F\|grpclog.V' -- "*.go" | not grep -v '^grpclog/component.go\|^internal/grpctest/tlogger_test.go' +git grep -l -e 'grpclog.I' --or -e 'grpclog.W' --or -e 'grpclog.E' --or -e 'grpclog.F' --or -e 'grpclog.V' -- "*.go" | not grep -v '^grpclog/component.go\|^internal/grpctest/tlogger_test.go' # - Ensure all ptypes proto packages are renamed when importing. not git grep "\(import \|^\s*\)\"github.com/golang/protobuf/ptypes/" -- "*.go" @@ -89,19 +93,8 @@ not git grep "\(import \|^\s*\)\"github.com/golang/protobuf/ptypes/" -- "*.go" # - Ensure all xds proto imports are renamed to *pb or *grpc. git grep '"github.com/envoyproxy/go-control-plane/envoy' -- '*.go' ':(exclude)*.pb.go' | not grep -v 'pb "\|grpc "' -# - Check imports that are illegal in appengine (until Go 1.11). -# TODO: Remove when we drop Go 1.10 support -go list -f {{.Dir}} ./... | xargs go run test/go_vet/vet.go - misspell -error . -# - Check that generated proto files are up to date. -if [[ -z "${VET_SKIP_PROTO}" ]]; then - PATH="/home/travis/bin:${PATH}" make proto && \ - git status --porcelain 2>&1 | fail_on_output || \ - (git status; git --no-pager diff; exit 1) -fi - # - gofmt, goimports, golint (with exceptions for generated code), go vet, # go mod tidy. # Perform these checks on each module inside gRPC. @@ -111,9 +104,9 @@ for MOD_FILE in $(find . -name 'go.mod'); do go vet -all ./... | fail_on_output gofmt -s -d -l . 2>&1 | fail_on_output goimports -l . 2>&1 | not grep -vE "\.pb\.go" - golint ./... 2>&1 | not grep -vE "/testv3\.pb\.go:" + golint ./... 2>&1 | not grep -vE "/grpc_testing_not_regenerate/.*\.pb\.go:" - go mod tidy + go mod tidy -compat=1.17 git status --porcelain 2>&1 | fail_on_output || \ (git status; git --no-pager diff; exit 1) popd @@ -123,8 +116,9 @@ done # # TODO(dfawley): don't use deprecated functions in examples or first-party # plugins. +# TODO(dfawley): enable ST1019 (duplicate imports) but allow for protobufs. SC_OUT="$(mktemp)" -staticcheck -go 1.9 -checks 'inherit,-ST1015' ./... > "${SC_OUT}" || true +staticcheck -go 1.19 -checks 'inherit,-ST1015,-ST1019,-SA1019' ./... > "${SC_OUT}" || true # Error if anything other than deprecation warnings are printed. not grep -v "is deprecated:.*SA1019" "${SC_OUT}" # Only ignore the following deprecated types/fields/functions. @@ -151,7 +145,6 @@ grpc.NewGZIPDecompressor grpc.RPCCompressor grpc.RPCDecompressor grpc.ServiceConfig -grpc.WithBalancerName grpc.WithCompressor grpc.WithDecompressor grpc.WithDialer diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/doc.go b/vendor/google.golang.org/protobuf/encoding/protojson/doc.go index 00ea2fecf..21d5d2cb1 100644 --- a/vendor/google.golang.org/protobuf/encoding/protojson/doc.go +++ b/vendor/google.golang.org/protobuf/encoding/protojson/doc.go @@ -4,7 +4,7 @@ // Package protojson marshals and unmarshals protocol buffer messages as JSON // format. It follows the guide at -// https://developers.google.com/protocol-buffers/docs/proto3#json. +// https://protobuf.dev/programming-guides/proto3#json. // // This package produces a different output than the standard "encoding/json" // package, which does not operate correctly on protocol buffer messages. diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go b/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go index c85f84694..6c37d4174 100644 --- a/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go +++ b/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go @@ -814,16 +814,22 @@ func (d decoder) unmarshalTimestamp(m protoreflect.Message) error { return d.unexpectedTokenError(tok) } - t, err := time.Parse(time.RFC3339Nano, tok.ParsedString()) + s := tok.ParsedString() + t, err := time.Parse(time.RFC3339Nano, s) if err != nil { return d.newError(tok.Pos(), "invalid %v value %v", genid.Timestamp_message_fullname, tok.RawString()) } - // Validate seconds. No need to validate nanos because time.Parse would have - // covered that already. + // Validate seconds. secs := t.Unix() if secs < minTimestampSeconds || secs > maxTimestampSeconds { return d.newError(tok.Pos(), "%v value out of range: %v", genid.Timestamp_message_fullname, tok.RawString()) } + // Validate subseconds. + i := strings.LastIndexByte(s, '.') // start of subsecond field + j := strings.LastIndexAny(s, "Z-+") // start of timezone field + if i >= 0 && j >= i && j-i > len(".999999999") { + return d.newError(tok.Pos(), "invalid %v value %v", genid.Timestamp_message_fullname, tok.RawString()) + } fds := m.Descriptor().Fields() fdSeconds := fds.ByNumber(genid.Timestamp_Seconds_field_number) diff --git a/vendor/google.golang.org/protobuf/encoding/protowire/wire.go b/vendor/google.golang.org/protobuf/encoding/protowire/wire.go index ce57f57eb..f4b4686cf 100644 --- a/vendor/google.golang.org/protobuf/encoding/protowire/wire.go +++ b/vendor/google.golang.org/protobuf/encoding/protowire/wire.go @@ -3,7 +3,7 @@ // license that can be found in the LICENSE file. // Package protowire parses and formats the raw wire encoding. -// See https://developers.google.com/protocol-buffers/docs/encoding. +// See https://protobuf.dev/programming-guides/encoding. // // For marshaling and unmarshaling entire protobuf messages, // use the "google.golang.org/protobuf/proto" package instead. @@ -29,12 +29,8 @@ const ( ) // IsValid reports whether the field number is semantically valid. -// -// Note that while numbers within the reserved range are semantically invalid, -// they are syntactically valid in the wire format. -// Implementations may treat records with reserved field numbers as unknown. func (n Number) IsValid() bool { - return MinValidNumber <= n && n < FirstReservedNumber || LastReservedNumber < n && n <= MaxValidNumber + return MinValidNumber <= n && n <= MaxValidNumber } // Type represents the wire type. diff --git a/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go b/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go index b13fd29e8..d043a6ebe 100644 --- a/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go +++ b/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go @@ -294,7 +294,7 @@ func (d *Decoder) isValueNext() bool { } // consumeToken constructs a Token for given Kind with raw value derived from -// current d.in and given size, and consumes the given size-lenght of it. +// current d.in and given size, and consumes the given size-length of it. func (d *Decoder) consumeToken(kind Kind, size int) Token { tok := Token{ kind: kind, diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go b/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go index 427c62d03..87853e786 100644 --- a/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go +++ b/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go @@ -412,12 +412,13 @@ func (d *Decoder) parseFieldName() (tok Token, err error) { // Field number. Identify if input is a valid number that is not negative // and is decimal integer within 32-bit range. if num := parseNumber(d.in); num.size > 0 { + str := num.string(d.in) if !num.neg && num.kind == numDec { - if _, err := strconv.ParseInt(string(d.in[:num.size]), 10, 32); err == nil { + if _, err := strconv.ParseInt(str, 10, 32); err == nil { return d.consumeToken(Name, num.size, uint8(FieldNumber)), nil } } - return Token{}, d.newSyntaxError("invalid field number: %s", d.in[:num.size]) + return Token{}, d.newSyntaxError("invalid field number: %s", str) } return Token{}, d.newSyntaxError("invalid field name: %s", errId(d.in)) diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/decode_number.go b/vendor/google.golang.org/protobuf/internal/encoding/text/decode_number.go index 81a5d8c86..45c81f029 100644 --- a/vendor/google.golang.org/protobuf/internal/encoding/text/decode_number.go +++ b/vendor/google.golang.org/protobuf/internal/encoding/text/decode_number.go @@ -15,17 +15,12 @@ func (d *Decoder) parseNumberValue() (Token, bool) { if num.neg { numAttrs |= isNegative } - strSize := num.size - last := num.size - 1 - if num.kind == numFloat && (d.in[last] == 'f' || d.in[last] == 'F') { - strSize = last - } tok := Token{ kind: Scalar, attrs: numberValue, pos: len(d.orig) - len(d.in), raw: d.in[:num.size], - str: string(d.in[:strSize]), + str: num.string(d.in), numAttrs: numAttrs, } d.consume(num.size) @@ -46,6 +41,27 @@ type number struct { kind uint8 neg bool size int + // if neg, this is the length of whitespace and comments between + // the minus sign and the rest fo the number literal + sep int +} + +func (num number) string(data []byte) string { + strSize := num.size + last := num.size - 1 + if num.kind == numFloat && (data[last] == 'f' || data[last] == 'F') { + strSize = last + } + if num.neg && num.sep > 0 { + // strip whitespace/comments between negative sign and the rest + strLen := strSize - num.sep + str := make([]byte, strLen) + str[0] = data[0] + copy(str[1:], data[num.sep+1:strSize]) + return string(str) + } + return string(data[:strSize]) + } // parseNumber constructs a number object from given input. It allows for the @@ -67,19 +83,22 @@ func parseNumber(input []byte) number { } // Optional - + var sep int if s[0] == '-' { neg = true s = s[1:] size++ + // Consume any whitespace or comments between the + // negative sign and the rest of the number + lenBefore := len(s) + s = consume(s, 0) + sep = lenBefore - len(s) + size += sep if len(s) == 0 { return number{} } } - // C++ allows for whitespace and comments in between the negative sign and - // the rest of the number. This logic currently does not but is consistent - // with v1. - switch { case s[0] == '0': if len(s) > 1 { @@ -116,7 +135,7 @@ func parseNumber(input []byte) number { if len(s) > 0 && !isDelim(s[0]) { return number{} } - return number{kind: kind, neg: neg, size: size} + return number{kind: kind, neg: neg, size: size, sep: sep} } } s = s[1:] @@ -188,5 +207,5 @@ func parseNumber(input []byte) number { return number{} } - return number{kind: kind, neg: neg, size: size} + return number{kind: kind, neg: neg, size: size, sep: sep} } diff --git a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go index e3cdf1c20..5c0e8f73f 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go +++ b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go @@ -50,6 +50,7 @@ const ( FileDescriptorProto_Options_field_name protoreflect.Name = "options" FileDescriptorProto_SourceCodeInfo_field_name protoreflect.Name = "source_code_info" FileDescriptorProto_Syntax_field_name protoreflect.Name = "syntax" + FileDescriptorProto_Edition_field_name protoreflect.Name = "edition" FileDescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.name" FileDescriptorProto_Package_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.package" @@ -63,6 +64,7 @@ const ( FileDescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.options" FileDescriptorProto_SourceCodeInfo_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.source_code_info" FileDescriptorProto_Syntax_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.syntax" + FileDescriptorProto_Edition_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.edition" ) // Field numbers for google.protobuf.FileDescriptorProto. @@ -79,6 +81,7 @@ const ( FileDescriptorProto_Options_field_number protoreflect.FieldNumber = 8 FileDescriptorProto_SourceCodeInfo_field_number protoreflect.FieldNumber = 9 FileDescriptorProto_Syntax_field_number protoreflect.FieldNumber = 12 + FileDescriptorProto_Edition_field_number protoreflect.FieldNumber = 13 ) // Names for google.protobuf.DescriptorProto. @@ -494,26 +497,29 @@ const ( // Field names for google.protobuf.MessageOptions. const ( - MessageOptions_MessageSetWireFormat_field_name protoreflect.Name = "message_set_wire_format" - MessageOptions_NoStandardDescriptorAccessor_field_name protoreflect.Name = "no_standard_descriptor_accessor" - MessageOptions_Deprecated_field_name protoreflect.Name = "deprecated" - MessageOptions_MapEntry_field_name protoreflect.Name = "map_entry" - MessageOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" + MessageOptions_MessageSetWireFormat_field_name protoreflect.Name = "message_set_wire_format" + MessageOptions_NoStandardDescriptorAccessor_field_name protoreflect.Name = "no_standard_descriptor_accessor" + MessageOptions_Deprecated_field_name protoreflect.Name = "deprecated" + MessageOptions_MapEntry_field_name protoreflect.Name = "map_entry" + MessageOptions_DeprecatedLegacyJsonFieldConflicts_field_name protoreflect.Name = "deprecated_legacy_json_field_conflicts" + MessageOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" - MessageOptions_MessageSetWireFormat_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.message_set_wire_format" - MessageOptions_NoStandardDescriptorAccessor_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.no_standard_descriptor_accessor" - MessageOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.deprecated" - MessageOptions_MapEntry_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.map_entry" - MessageOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.uninterpreted_option" + MessageOptions_MessageSetWireFormat_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.message_set_wire_format" + MessageOptions_NoStandardDescriptorAccessor_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.no_standard_descriptor_accessor" + MessageOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.deprecated" + MessageOptions_MapEntry_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.map_entry" + MessageOptions_DeprecatedLegacyJsonFieldConflicts_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.deprecated_legacy_json_field_conflicts" + MessageOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.uninterpreted_option" ) // Field numbers for google.protobuf.MessageOptions. const ( - MessageOptions_MessageSetWireFormat_field_number protoreflect.FieldNumber = 1 - MessageOptions_NoStandardDescriptorAccessor_field_number protoreflect.FieldNumber = 2 - MessageOptions_Deprecated_field_number protoreflect.FieldNumber = 3 - MessageOptions_MapEntry_field_number protoreflect.FieldNumber = 7 - MessageOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 + MessageOptions_MessageSetWireFormat_field_number protoreflect.FieldNumber = 1 + MessageOptions_NoStandardDescriptorAccessor_field_number protoreflect.FieldNumber = 2 + MessageOptions_Deprecated_field_number protoreflect.FieldNumber = 3 + MessageOptions_MapEntry_field_number protoreflect.FieldNumber = 7 + MessageOptions_DeprecatedLegacyJsonFieldConflicts_field_number protoreflect.FieldNumber = 11 + MessageOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ) // Names for google.protobuf.FieldOptions. @@ -528,16 +534,24 @@ const ( FieldOptions_Packed_field_name protoreflect.Name = "packed" FieldOptions_Jstype_field_name protoreflect.Name = "jstype" FieldOptions_Lazy_field_name protoreflect.Name = "lazy" + FieldOptions_UnverifiedLazy_field_name protoreflect.Name = "unverified_lazy" FieldOptions_Deprecated_field_name protoreflect.Name = "deprecated" FieldOptions_Weak_field_name protoreflect.Name = "weak" + FieldOptions_DebugRedact_field_name protoreflect.Name = "debug_redact" + FieldOptions_Retention_field_name protoreflect.Name = "retention" + FieldOptions_Target_field_name protoreflect.Name = "target" FieldOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" FieldOptions_Ctype_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.ctype" FieldOptions_Packed_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.packed" FieldOptions_Jstype_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.jstype" FieldOptions_Lazy_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.lazy" + FieldOptions_UnverifiedLazy_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.unverified_lazy" FieldOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.deprecated" FieldOptions_Weak_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.weak" + FieldOptions_DebugRedact_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.debug_redact" + FieldOptions_Retention_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.retention" + FieldOptions_Target_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.target" FieldOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.uninterpreted_option" ) @@ -547,8 +561,12 @@ const ( FieldOptions_Packed_field_number protoreflect.FieldNumber = 2 FieldOptions_Jstype_field_number protoreflect.FieldNumber = 6 FieldOptions_Lazy_field_number protoreflect.FieldNumber = 5 + FieldOptions_UnverifiedLazy_field_number protoreflect.FieldNumber = 15 FieldOptions_Deprecated_field_number protoreflect.FieldNumber = 3 FieldOptions_Weak_field_number protoreflect.FieldNumber = 10 + FieldOptions_DebugRedact_field_number protoreflect.FieldNumber = 16 + FieldOptions_Retention_field_number protoreflect.FieldNumber = 17 + FieldOptions_Target_field_number protoreflect.FieldNumber = 18 FieldOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ) @@ -564,6 +582,18 @@ const ( FieldOptions_JSType_enum_name = "JSType" ) +// Full and short names for google.protobuf.FieldOptions.OptionRetention. +const ( + FieldOptions_OptionRetention_enum_fullname = "google.protobuf.FieldOptions.OptionRetention" + FieldOptions_OptionRetention_enum_name = "OptionRetention" +) + +// Full and short names for google.protobuf.FieldOptions.OptionTargetType. +const ( + FieldOptions_OptionTargetType_enum_fullname = "google.protobuf.FieldOptions.OptionTargetType" + FieldOptions_OptionTargetType_enum_name = "OptionTargetType" +) + // Names for google.protobuf.OneofOptions. const ( OneofOptions_message_name protoreflect.Name = "OneofOptions" @@ -590,20 +620,23 @@ const ( // Field names for google.protobuf.EnumOptions. const ( - EnumOptions_AllowAlias_field_name protoreflect.Name = "allow_alias" - EnumOptions_Deprecated_field_name protoreflect.Name = "deprecated" - EnumOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" + EnumOptions_AllowAlias_field_name protoreflect.Name = "allow_alias" + EnumOptions_Deprecated_field_name protoreflect.Name = "deprecated" + EnumOptions_DeprecatedLegacyJsonFieldConflicts_field_name protoreflect.Name = "deprecated_legacy_json_field_conflicts" + EnumOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" - EnumOptions_AllowAlias_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.allow_alias" - EnumOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.deprecated" - EnumOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.uninterpreted_option" + EnumOptions_AllowAlias_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.allow_alias" + EnumOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.deprecated" + EnumOptions_DeprecatedLegacyJsonFieldConflicts_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.deprecated_legacy_json_field_conflicts" + EnumOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.uninterpreted_option" ) // Field numbers for google.protobuf.EnumOptions. const ( - EnumOptions_AllowAlias_field_number protoreflect.FieldNumber = 2 - EnumOptions_Deprecated_field_number protoreflect.FieldNumber = 3 - EnumOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 + EnumOptions_AllowAlias_field_number protoreflect.FieldNumber = 2 + EnumOptions_Deprecated_field_number protoreflect.FieldNumber = 3 + EnumOptions_DeprecatedLegacyJsonFieldConflicts_field_number protoreflect.FieldNumber = 6 + EnumOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ) // Names for google.protobuf.EnumValueOptions. @@ -813,11 +846,13 @@ const ( GeneratedCodeInfo_Annotation_SourceFile_field_name protoreflect.Name = "source_file" GeneratedCodeInfo_Annotation_Begin_field_name protoreflect.Name = "begin" GeneratedCodeInfo_Annotation_End_field_name protoreflect.Name = "end" + GeneratedCodeInfo_Annotation_Semantic_field_name protoreflect.Name = "semantic" GeneratedCodeInfo_Annotation_Path_field_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo.Annotation.path" GeneratedCodeInfo_Annotation_SourceFile_field_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo.Annotation.source_file" GeneratedCodeInfo_Annotation_Begin_field_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo.Annotation.begin" GeneratedCodeInfo_Annotation_End_field_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo.Annotation.end" + GeneratedCodeInfo_Annotation_Semantic_field_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo.Annotation.semantic" ) // Field numbers for google.protobuf.GeneratedCodeInfo.Annotation. @@ -826,4 +861,11 @@ const ( GeneratedCodeInfo_Annotation_SourceFile_field_number protoreflect.FieldNumber = 2 GeneratedCodeInfo_Annotation_Begin_field_number protoreflect.FieldNumber = 3 GeneratedCodeInfo_Annotation_End_field_number protoreflect.FieldNumber = 4 + GeneratedCodeInfo_Annotation_Semantic_field_number protoreflect.FieldNumber = 5 +) + +// Full and short names for google.protobuf.GeneratedCodeInfo.Annotation.Semantic. +const ( + GeneratedCodeInfo_Annotation_Semantic_enum_fullname = "google.protobuf.GeneratedCodeInfo.Annotation.Semantic" + GeneratedCodeInfo_Annotation_Semantic_enum_name = "Semantic" ) diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert.go b/vendor/google.golang.org/protobuf/internal/impl/convert.go index 11a6128ba..185ef2efa 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/convert.go +++ b/vendor/google.golang.org/protobuf/internal/impl/convert.go @@ -59,7 +59,6 @@ func NewConverter(t reflect.Type, fd protoreflect.FieldDescriptor) Converter { default: return newSingularConverter(t, fd) } - panic(fmt.Sprintf("invalid Go type %v for field %v", t, fd.FullName())) } var ( diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go index fea589c45..61a84d341 100644 --- a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go +++ b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go @@ -87,7 +87,7 @@ func (sb *Builder) grow(n int) { // Unlike strings.Builder, we do not need to copy over the contents // of the old buffer since our builder provides no API for // retrieving previously created strings. - sb.buf = make([]byte, 2*(cap(sb.buf)+n)) + sb.buf = make([]byte, 0, 2*(cap(sb.buf)+n)) } func (sb *Builder) last(n int) string { diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go index b480c5010..f7014cd51 100644 --- a/vendor/google.golang.org/protobuf/internal/version/version.go +++ b/vendor/google.golang.org/protobuf/internal/version/version.go @@ -51,8 +51,8 @@ import ( // 10. Send out the CL for review and submit it. const ( Major = 1 - Minor = 28 - Patch = 1 + Minor = 30 + Patch = 0 PreRelease = "" ) diff --git a/vendor/google.golang.org/protobuf/proto/doc.go b/vendor/google.golang.org/protobuf/proto/doc.go index 08d2a46f5..ec71e717f 100644 --- a/vendor/google.golang.org/protobuf/proto/doc.go +++ b/vendor/google.golang.org/protobuf/proto/doc.go @@ -5,16 +5,13 @@ // Package proto provides functions operating on protocol buffer messages. // // For documentation on protocol buffers in general, see: -// -// https://developers.google.com/protocol-buffers +// https://protobuf.dev. // // For a tutorial on using protocol buffers with Go, see: -// -// https://developers.google.com/protocol-buffers/docs/gotutorial +// https://protobuf.dev/getting-started/gotutorial. // // For a guide to generated Go protocol buffer code, see: -// -// https://developers.google.com/protocol-buffers/docs/reference/go-generated +// https://protobuf.dev/reference/go/go-generated. // // # Binary serialization // diff --git a/vendor/google.golang.org/protobuf/proto/equal.go b/vendor/google.golang.org/protobuf/proto/equal.go index 67948dd1d..1a0be1b03 100644 --- a/vendor/google.golang.org/protobuf/proto/equal.go +++ b/vendor/google.golang.org/protobuf/proto/equal.go @@ -5,30 +5,39 @@ package proto import ( - "bytes" - "math" "reflect" - "google.golang.org/protobuf/encoding/protowire" "google.golang.org/protobuf/reflect/protoreflect" ) -// Equal reports whether two messages are equal. -// If two messages marshal to the same bytes under deterministic serialization, -// then Equal is guaranteed to report true. +// Equal reports whether two messages are equal, +// by recursively comparing the fields of the message. // -// Two messages are equal if they belong to the same message descriptor, -// have the same set of populated known and extension field values, -// and the same set of unknown fields values. If either of the top-level -// messages are invalid, then Equal reports true only if both are invalid. +// - Bytes fields are equal if they contain identical bytes. +// Empty bytes (regardless of nil-ness) are considered equal. // -// Scalar values are compared with the equivalent of the == operator in Go, -// except bytes values which are compared using bytes.Equal and -// floating point values which specially treat NaNs as equal. -// Message values are compared by recursively calling Equal. -// Lists are equal if each element value is also equal. -// Maps are equal if they have the same set of keys, where the pair of values -// for each key is also equal. +// - Floating-point fields are equal if they contain the same value. +// Unlike the == operator, a NaN is equal to another NaN. +// +// - Other scalar fields are equal if they contain the same value. +// +// - Message fields are equal if they have +// the same set of populated known and extension field values, and +// the same set of unknown fields values. +// +// - Lists are equal if they are the same length and +// each corresponding element is equal. +// +// - Maps are equal if they have the same set of keys and +// the corresponding value for each key is equal. +// +// An invalid message is not equal to a valid message. +// An invalid message is only equal to another invalid message of the +// same type. An invalid message often corresponds to a nil pointer +// of the concrete message type. For example, (*pb.M)(nil) is not equal +// to &pb.M{}. +// If two valid messages marshal to the same bytes under deterministic +// serialization, then Equal is guaranteed to report true. func Equal(x, y Message) bool { if x == nil || y == nil { return x == nil && y == nil @@ -42,130 +51,7 @@ func Equal(x, y Message) bool { if mx.IsValid() != my.IsValid() { return false } - return equalMessage(mx, my) -} - -// equalMessage compares two messages. -func equalMessage(mx, my protoreflect.Message) bool { - if mx.Descriptor() != my.Descriptor() { - return false - } - - nx := 0 - equal := true - mx.Range(func(fd protoreflect.FieldDescriptor, vx protoreflect.Value) bool { - nx++ - vy := my.Get(fd) - equal = my.Has(fd) && equalField(fd, vx, vy) - return equal - }) - if !equal { - return false - } - ny := 0 - my.Range(func(fd protoreflect.FieldDescriptor, vx protoreflect.Value) bool { - ny++ - return true - }) - if nx != ny { - return false - } - - return equalUnknown(mx.GetUnknown(), my.GetUnknown()) -} - -// equalField compares two fields. -func equalField(fd protoreflect.FieldDescriptor, x, y protoreflect.Value) bool { - switch { - case fd.IsList(): - return equalList(fd, x.List(), y.List()) - case fd.IsMap(): - return equalMap(fd, x.Map(), y.Map()) - default: - return equalValue(fd, x, y) - } -} - -// equalMap compares two maps. -func equalMap(fd protoreflect.FieldDescriptor, x, y protoreflect.Map) bool { - if x.Len() != y.Len() { - return false - } - equal := true - x.Range(func(k protoreflect.MapKey, vx protoreflect.Value) bool { - vy := y.Get(k) - equal = y.Has(k) && equalValue(fd.MapValue(), vx, vy) - return equal - }) - return equal -} - -// equalList compares two lists. -func equalList(fd protoreflect.FieldDescriptor, x, y protoreflect.List) bool { - if x.Len() != y.Len() { - return false - } - for i := x.Len() - 1; i >= 0; i-- { - if !equalValue(fd, x.Get(i), y.Get(i)) { - return false - } - } - return true -} - -// equalValue compares two singular values. -func equalValue(fd protoreflect.FieldDescriptor, x, y protoreflect.Value) bool { - switch fd.Kind() { - case protoreflect.BoolKind: - return x.Bool() == y.Bool() - case protoreflect.EnumKind: - return x.Enum() == y.Enum() - case protoreflect.Int32Kind, protoreflect.Sint32Kind, - protoreflect.Int64Kind, protoreflect.Sint64Kind, - protoreflect.Sfixed32Kind, protoreflect.Sfixed64Kind: - return x.Int() == y.Int() - case protoreflect.Uint32Kind, protoreflect.Uint64Kind, - protoreflect.Fixed32Kind, protoreflect.Fixed64Kind: - return x.Uint() == y.Uint() - case protoreflect.FloatKind, protoreflect.DoubleKind: - fx := x.Float() - fy := y.Float() - if math.IsNaN(fx) || math.IsNaN(fy) { - return math.IsNaN(fx) && math.IsNaN(fy) - } - return fx == fy - case protoreflect.StringKind: - return x.String() == y.String() - case protoreflect.BytesKind: - return bytes.Equal(x.Bytes(), y.Bytes()) - case protoreflect.MessageKind, protoreflect.GroupKind: - return equalMessage(x.Message(), y.Message()) - default: - return x.Interface() == y.Interface() - } -} - -// equalUnknown compares unknown fields by direct comparison on the raw bytes -// of each individual field number. -func equalUnknown(x, y protoreflect.RawFields) bool { - if len(x) != len(y) { - return false - } - if bytes.Equal([]byte(x), []byte(y)) { - return true - } - - mx := make(map[protoreflect.FieldNumber]protoreflect.RawFields) - my := make(map[protoreflect.FieldNumber]protoreflect.RawFields) - for len(x) > 0 { - fnum, _, n := protowire.ConsumeField(x) - mx[fnum] = append(mx[fnum], x[:n]...) - x = x[n:] - } - for len(y) > 0 { - fnum, _, n := protowire.ConsumeField(y) - my[fnum] = append(my[fnum], y[:n]...) - y = y[n:] - } - return reflect.DeepEqual(mx, my) + vx := protoreflect.ValueOfMessage(mx) + vy := protoreflect.ValueOfMessage(my) + return vx.Equal(vy) } diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go index b03c1223c..54ce326df 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go @@ -35,6 +35,8 @@ func (p *SourcePath) appendFileDescriptorProto(b []byte) []byte { b = p.appendSingularField(b, "source_code_info", (*SourcePath).appendSourceCodeInfo) case 12: b = p.appendSingularField(b, "syntax", nil) + case 13: + b = p.appendSingularField(b, "edition", nil) } return b } @@ -236,6 +238,8 @@ func (p *SourcePath) appendMessageOptions(b []byte) []byte { b = p.appendSingularField(b, "deprecated", nil) case 7: b = p.appendSingularField(b, "map_entry", nil) + case 11: + b = p.appendSingularField(b, "deprecated_legacy_json_field_conflicts", nil) case 999: b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) } @@ -279,6 +283,8 @@ func (p *SourcePath) appendEnumOptions(b []byte) []byte { b = p.appendSingularField(b, "allow_alias", nil) case 3: b = p.appendSingularField(b, "deprecated", nil) + case 6: + b = p.appendSingularField(b, "deprecated_legacy_json_field_conflicts", nil) case 999: b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) } @@ -345,10 +351,18 @@ func (p *SourcePath) appendFieldOptions(b []byte) []byte { b = p.appendSingularField(b, "jstype", nil) case 5: b = p.appendSingularField(b, "lazy", nil) + case 15: + b = p.appendSingularField(b, "unverified_lazy", nil) case 3: b = p.appendSingularField(b, "deprecated", nil) case 10: b = p.appendSingularField(b, "weak", nil) + case 16: + b = p.appendSingularField(b, "debug_redact", nil) + case 17: + b = p.appendSingularField(b, "retention", nil) + case 18: + b = p.appendSingularField(b, "target", nil) case 999: b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) } diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go index f31981077..37601b781 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go @@ -148,7 +148,7 @@ type Message interface { // be preserved in marshaling or other operations. IsValid() bool - // ProtoMethods returns optional fast-path implementions of various operations. + // ProtoMethods returns optional fast-path implementations of various operations. // This method may return nil. // // The returned methods type is identical to diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_equal.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_equal.go new file mode 100644 index 000000000..591652541 --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_equal.go @@ -0,0 +1,168 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protoreflect + +import ( + "bytes" + "fmt" + "math" + "reflect" + + "google.golang.org/protobuf/encoding/protowire" +) + +// Equal reports whether v1 and v2 are recursively equal. +// +// - Values of different types are always unequal. +// +// - Bytes values are equal if they contain identical bytes. +// Empty bytes (regardless of nil-ness) are considered equal. +// +// - Floating point values are equal if they contain the same value. +// Unlike the == operator, a NaN is equal to another NaN. +// +// - Enums are equal if they contain the same number. +// Since Value does not contain an enum descriptor, +// enum values do not consider the type of the enum. +// +// - Other scalar values are equal if they contain the same value. +// +// - Message values are equal if they belong to the same message descriptor, +// have the same set of populated known and extension field values, +// and the same set of unknown fields values. +// +// - Lists are equal if they are the same length and +// each corresponding element is equal. +// +// - Maps are equal if they have the same set of keys and +// the corresponding value for each key is equal. +func (v1 Value) Equal(v2 Value) bool { + return equalValue(v1, v2) +} + +func equalValue(x, y Value) bool { + eqType := x.typ == y.typ + switch x.typ { + case nilType: + return eqType + case boolType: + return eqType && x.Bool() == y.Bool() + case int32Type, int64Type: + return eqType && x.Int() == y.Int() + case uint32Type, uint64Type: + return eqType && x.Uint() == y.Uint() + case float32Type, float64Type: + return eqType && equalFloat(x.Float(), y.Float()) + case stringType: + return eqType && x.String() == y.String() + case bytesType: + return eqType && bytes.Equal(x.Bytes(), y.Bytes()) + case enumType: + return eqType && x.Enum() == y.Enum() + default: + switch x := x.Interface().(type) { + case Message: + y, ok := y.Interface().(Message) + return ok && equalMessage(x, y) + case List: + y, ok := y.Interface().(List) + return ok && equalList(x, y) + case Map: + y, ok := y.Interface().(Map) + return ok && equalMap(x, y) + default: + panic(fmt.Sprintf("unknown type: %T", x)) + } + } +} + +// equalFloat compares two floats, where NaNs are treated as equal. +func equalFloat(x, y float64) bool { + if math.IsNaN(x) || math.IsNaN(y) { + return math.IsNaN(x) && math.IsNaN(y) + } + return x == y +} + +// equalMessage compares two messages. +func equalMessage(mx, my Message) bool { + if mx.Descriptor() != my.Descriptor() { + return false + } + + nx := 0 + equal := true + mx.Range(func(fd FieldDescriptor, vx Value) bool { + nx++ + vy := my.Get(fd) + equal = my.Has(fd) && equalValue(vx, vy) + return equal + }) + if !equal { + return false + } + ny := 0 + my.Range(func(fd FieldDescriptor, vx Value) bool { + ny++ + return true + }) + if nx != ny { + return false + } + + return equalUnknown(mx.GetUnknown(), my.GetUnknown()) +} + +// equalList compares two lists. +func equalList(x, y List) bool { + if x.Len() != y.Len() { + return false + } + for i := x.Len() - 1; i >= 0; i-- { + if !equalValue(x.Get(i), y.Get(i)) { + return false + } + } + return true +} + +// equalMap compares two maps. +func equalMap(x, y Map) bool { + if x.Len() != y.Len() { + return false + } + equal := true + x.Range(func(k MapKey, vx Value) bool { + vy := y.Get(k) + equal = y.Has(k) && equalValue(vx, vy) + return equal + }) + return equal +} + +// equalUnknown compares unknown fields by direct comparison on the raw bytes +// of each individual field number. +func equalUnknown(x, y RawFields) bool { + if len(x) != len(y) { + return false + } + if bytes.Equal([]byte(x), []byte(y)) { + return true + } + + mx := make(map[FieldNumber]RawFields) + my := make(map[FieldNumber]RawFields) + for len(x) > 0 { + fnum, _, n := protowire.ConsumeField(x) + mx[fnum] = append(mx[fnum], x[:n]...) + x = x[n:] + } + for len(y) > 0 { + fnum, _, n := protowire.ConsumeField(y) + my[fnum] = append(my[fnum], y[:n]...) + y = y[n:] + } + return reflect.DeepEqual(mx, my) +} diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go index ca8e28c5b..08e5ef73f 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go @@ -54,11 +54,11 @@ import ( // // Append a 0 to a "repeated int32" field. // // Since the Value returned by Mutable is guaranteed to alias // // the source message, modifying the Value modifies the message. -// message.Mutable(fieldDesc).(List).Append(protoreflect.ValueOfInt32(0)) +// message.Mutable(fieldDesc).List().Append(protoreflect.ValueOfInt32(0)) // // // Assign [0] to a "repeated int32" field by creating a new Value, // // modifying it, and assigning it. -// list := message.NewField(fieldDesc).(List) +// list := message.NewField(fieldDesc).List() // list.Append(protoreflect.ValueOfInt32(0)) // message.Set(fieldDesc, list) // // ERROR: Since it is not defined whether Set aliases the source, diff --git a/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go b/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go index 58352a697..aeb559774 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go +++ b/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go @@ -46,7 +46,7 @@ var conflictPolicy = "panic" // "panic" | "warn" | "ignore" // It is a variable so that the behavior is easily overridden in another file. var ignoreConflict = func(d protoreflect.Descriptor, err error) bool { const env = "GOLANG_PROTOBUF_REGISTRATION_CONFLICT" - const faq = "https://developers.google.com/protocol-buffers/docs/reference/go/faq#namespace-conflict" + const faq = "https://protobuf.dev/reference/go/faq#namespace-conflict" policy := conflictPolicy if v := os.Getenv(env); v != "" { policy = v diff --git a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go index abe4ab511..dac5671db 100644 --- a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go +++ b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go @@ -406,6 +406,152 @@ func (FieldOptions_JSType) EnumDescriptor() ([]byte, []int) { return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{12, 1} } +// If set to RETENTION_SOURCE, the option will be omitted from the binary. +// Note: as of January 2023, support for this is in progress and does not yet +// have an effect (b/264593489). +type FieldOptions_OptionRetention int32 + +const ( + FieldOptions_RETENTION_UNKNOWN FieldOptions_OptionRetention = 0 + FieldOptions_RETENTION_RUNTIME FieldOptions_OptionRetention = 1 + FieldOptions_RETENTION_SOURCE FieldOptions_OptionRetention = 2 +) + +// Enum value maps for FieldOptions_OptionRetention. +var ( + FieldOptions_OptionRetention_name = map[int32]string{ + 0: "RETENTION_UNKNOWN", + 1: "RETENTION_RUNTIME", + 2: "RETENTION_SOURCE", + } + FieldOptions_OptionRetention_value = map[string]int32{ + "RETENTION_UNKNOWN": 0, + "RETENTION_RUNTIME": 1, + "RETENTION_SOURCE": 2, + } +) + +func (x FieldOptions_OptionRetention) Enum() *FieldOptions_OptionRetention { + p := new(FieldOptions_OptionRetention) + *p = x + return p +} + +func (x FieldOptions_OptionRetention) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FieldOptions_OptionRetention) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[5].Descriptor() +} + +func (FieldOptions_OptionRetention) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[5] +} + +func (x FieldOptions_OptionRetention) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *FieldOptions_OptionRetention) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = FieldOptions_OptionRetention(num) + return nil +} + +// Deprecated: Use FieldOptions_OptionRetention.Descriptor instead. +func (FieldOptions_OptionRetention) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{12, 2} +} + +// This indicates the types of entities that the field may apply to when used +// as an option. If it is unset, then the field may be freely used as an +// option on any kind of entity. Note: as of January 2023, support for this is +// in progress and does not yet have an effect (b/264593489). +type FieldOptions_OptionTargetType int32 + +const ( + FieldOptions_TARGET_TYPE_UNKNOWN FieldOptions_OptionTargetType = 0 + FieldOptions_TARGET_TYPE_FILE FieldOptions_OptionTargetType = 1 + FieldOptions_TARGET_TYPE_EXTENSION_RANGE FieldOptions_OptionTargetType = 2 + FieldOptions_TARGET_TYPE_MESSAGE FieldOptions_OptionTargetType = 3 + FieldOptions_TARGET_TYPE_FIELD FieldOptions_OptionTargetType = 4 + FieldOptions_TARGET_TYPE_ONEOF FieldOptions_OptionTargetType = 5 + FieldOptions_TARGET_TYPE_ENUM FieldOptions_OptionTargetType = 6 + FieldOptions_TARGET_TYPE_ENUM_ENTRY FieldOptions_OptionTargetType = 7 + FieldOptions_TARGET_TYPE_SERVICE FieldOptions_OptionTargetType = 8 + FieldOptions_TARGET_TYPE_METHOD FieldOptions_OptionTargetType = 9 +) + +// Enum value maps for FieldOptions_OptionTargetType. +var ( + FieldOptions_OptionTargetType_name = map[int32]string{ + 0: "TARGET_TYPE_UNKNOWN", + 1: "TARGET_TYPE_FILE", + 2: "TARGET_TYPE_EXTENSION_RANGE", + 3: "TARGET_TYPE_MESSAGE", + 4: "TARGET_TYPE_FIELD", + 5: "TARGET_TYPE_ONEOF", + 6: "TARGET_TYPE_ENUM", + 7: "TARGET_TYPE_ENUM_ENTRY", + 8: "TARGET_TYPE_SERVICE", + 9: "TARGET_TYPE_METHOD", + } + FieldOptions_OptionTargetType_value = map[string]int32{ + "TARGET_TYPE_UNKNOWN": 0, + "TARGET_TYPE_FILE": 1, + "TARGET_TYPE_EXTENSION_RANGE": 2, + "TARGET_TYPE_MESSAGE": 3, + "TARGET_TYPE_FIELD": 4, + "TARGET_TYPE_ONEOF": 5, + "TARGET_TYPE_ENUM": 6, + "TARGET_TYPE_ENUM_ENTRY": 7, + "TARGET_TYPE_SERVICE": 8, + "TARGET_TYPE_METHOD": 9, + } +) + +func (x FieldOptions_OptionTargetType) Enum() *FieldOptions_OptionTargetType { + p := new(FieldOptions_OptionTargetType) + *p = x + return p +} + +func (x FieldOptions_OptionTargetType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FieldOptions_OptionTargetType) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[6].Descriptor() +} + +func (FieldOptions_OptionTargetType) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[6] +} + +func (x FieldOptions_OptionTargetType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *FieldOptions_OptionTargetType) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = FieldOptions_OptionTargetType(num) + return nil +} + +// Deprecated: Use FieldOptions_OptionTargetType.Descriptor instead. +func (FieldOptions_OptionTargetType) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{12, 3} +} + // Is this method side-effect-free (or safe in HTTP parlance), or idempotent, // or neither? HTTP based RPC implementation may choose GET verb for safe // methods, and PUT verb for idempotent methods instead of the default POST. @@ -442,11 +588,11 @@ func (x MethodOptions_IdempotencyLevel) String() string { } func (MethodOptions_IdempotencyLevel) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[5].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[7].Descriptor() } func (MethodOptions_IdempotencyLevel) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[5] + return &file_google_protobuf_descriptor_proto_enumTypes[7] } func (x MethodOptions_IdempotencyLevel) Number() protoreflect.EnumNumber { @@ -468,6 +614,70 @@ func (MethodOptions_IdempotencyLevel) EnumDescriptor() ([]byte, []int) { return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{17, 0} } +// Represents the identified object's effect on the element in the original +// .proto file. +type GeneratedCodeInfo_Annotation_Semantic int32 + +const ( + // There is no effect or the effect is indescribable. + GeneratedCodeInfo_Annotation_NONE GeneratedCodeInfo_Annotation_Semantic = 0 + // The element is set or otherwise mutated. + GeneratedCodeInfo_Annotation_SET GeneratedCodeInfo_Annotation_Semantic = 1 + // An alias to the element is returned. + GeneratedCodeInfo_Annotation_ALIAS GeneratedCodeInfo_Annotation_Semantic = 2 +) + +// Enum value maps for GeneratedCodeInfo_Annotation_Semantic. +var ( + GeneratedCodeInfo_Annotation_Semantic_name = map[int32]string{ + 0: "NONE", + 1: "SET", + 2: "ALIAS", + } + GeneratedCodeInfo_Annotation_Semantic_value = map[string]int32{ + "NONE": 0, + "SET": 1, + "ALIAS": 2, + } +) + +func (x GeneratedCodeInfo_Annotation_Semantic) Enum() *GeneratedCodeInfo_Annotation_Semantic { + p := new(GeneratedCodeInfo_Annotation_Semantic) + *p = x + return p +} + +func (x GeneratedCodeInfo_Annotation_Semantic) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (GeneratedCodeInfo_Annotation_Semantic) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[8].Descriptor() +} + +func (GeneratedCodeInfo_Annotation_Semantic) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[8] +} + +func (x GeneratedCodeInfo_Annotation_Semantic) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *GeneratedCodeInfo_Annotation_Semantic) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = GeneratedCodeInfo_Annotation_Semantic(num) + return nil +} + +// Deprecated: Use GeneratedCodeInfo_Annotation_Semantic.Descriptor instead. +func (GeneratedCodeInfo_Annotation_Semantic) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{20, 0, 0} +} + // The protocol compiler can output a FileDescriptorSet containing the .proto // files it parses. type FileDescriptorSet struct { @@ -544,8 +754,12 @@ type FileDescriptorProto struct { // development tools. SourceCodeInfo *SourceCodeInfo `protobuf:"bytes,9,opt,name=source_code_info,json=sourceCodeInfo" json:"source_code_info,omitempty"` // The syntax of the proto file. - // The supported values are "proto2" and "proto3". + // The supported values are "proto2", "proto3", and "editions". + // + // If `edition` is present, this value must be "editions". Syntax *string `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"` + // The edition of the proto file, which is an opaque string. + Edition *string `protobuf:"bytes,13,opt,name=edition" json:"edition,omitempty"` } func (x *FileDescriptorProto) Reset() { @@ -664,6 +878,13 @@ func (x *FileDescriptorProto) GetSyntax() string { return "" } +func (x *FileDescriptorProto) GetEdition() string { + if x != nil && x.Edition != nil { + return *x.Edition + } + return "" +} + // Describes a message type. type DescriptorProto struct { state protoimpl.MessageState @@ -860,7 +1081,6 @@ type FieldDescriptorProto struct { // For booleans, "true" or "false". // For strings, contains the default text contents (not escaped in any way). // For bytes, contains the C escaped value. All bytes >= 128 are escaped. - // TODO(kenton): Base-64 encode? DefaultValue *string `protobuf:"bytes,7,opt,name=default_value,json=defaultValue" json:"default_value,omitempty"` // If set, gives the index of a oneof in the containing type's oneof_decl // list. This field is a member of that oneof. @@ -1382,22 +1602,22 @@ type FileOptions struct { // inappropriate because proto packages do not normally start with backwards // domain names. JavaPackage *string `protobuf:"bytes,1,opt,name=java_package,json=javaPackage" json:"java_package,omitempty"` - // If set, all the classes from the .proto file are wrapped in a single - // outer class with the given name. This applies to both Proto1 - // (equivalent to the old "--one_java_file" option) and Proto2 (where - // a .proto always translates to a single class, but you may want to - // explicitly choose the class name). + // Controls the name of the wrapper Java class generated for the .proto file. + // That class will always contain the .proto file's getDescriptor() method as + // well as any top-level extensions defined in the .proto file. + // If java_multiple_files is disabled, then all the other classes from the + // .proto file will be nested inside the single wrapper outer class. JavaOuterClassname *string `protobuf:"bytes,8,opt,name=java_outer_classname,json=javaOuterClassname" json:"java_outer_classname,omitempty"` - // If set true, then the Java code generator will generate a separate .java + // If enabled, then the Java code generator will generate a separate .java // file for each top-level message, enum, and service defined in the .proto - // file. Thus, these types will *not* be nested inside the outer class - // named by java_outer_classname. However, the outer class will still be + // file. Thus, these types will *not* be nested inside the wrapper class + // named by java_outer_classname. However, the wrapper class will still be // generated to contain the file's getDescriptor() method as well as any // top-level extensions defined in the file. JavaMultipleFiles *bool `protobuf:"varint,10,opt,name=java_multiple_files,json=javaMultipleFiles,def=0" json:"java_multiple_files,omitempty"` // This option does nothing. // - // Deprecated: Do not use. + // Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. JavaGenerateEqualsAndHash *bool `protobuf:"varint,20,opt,name=java_generate_equals_and_hash,json=javaGenerateEqualsAndHash" json:"java_generate_equals_and_hash,omitempty"` // If set true, then the Java2 code generator will generate code that // throws an exception whenever an attempt is made to assign a non-UTF-8 @@ -1531,7 +1751,7 @@ func (x *FileOptions) GetJavaMultipleFiles() bool { return Default_FileOptions_JavaMultipleFiles } -// Deprecated: Do not use. +// Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. func (x *FileOptions) GetJavaGenerateEqualsAndHash() bool { if x != nil && x.JavaGenerateEqualsAndHash != nil { return *x.JavaGenerateEqualsAndHash @@ -1670,10 +1890,12 @@ type MessageOptions struct { // efficient, has fewer features, and is more complicated. // // The message must be defined exactly as follows: - // message Foo { - // option message_set_wire_format = true; - // extensions 4 to max; - // } + // + // message Foo { + // option message_set_wire_format = true; + // extensions 4 to max; + // } + // // Note that the message cannot have any defined fields; MessageSets only // have extensions. // @@ -1692,28 +1914,44 @@ type MessageOptions struct { // for the message, or it will be completely ignored; in the very least, // this is a formalization for deprecating messages. Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // NOTE: Do not set the option in .proto files. Always use the maps syntax + // instead. The option should only be implicitly set by the proto compiler + // parser. + // // Whether the message is an automatically generated map entry type for the // maps field. // // For maps fields: - // map map_field = 1; + // + // map map_field = 1; + // // The parsed descriptor looks like: - // message MapFieldEntry { - // option map_entry = true; - // optional KeyType key = 1; - // optional ValueType value = 2; - // } - // repeated MapFieldEntry map_field = 1; + // + // message MapFieldEntry { + // option map_entry = true; + // optional KeyType key = 1; + // optional ValueType value = 2; + // } + // repeated MapFieldEntry map_field = 1; // // Implementations may choose not to generate the map_entry=true message, but // use a native map in the target language to hold the keys and values. // The reflection APIs in such implementations still need to work as // if the field is a repeated message field. - // - // NOTE: Do not set the option in .proto files. Always use the maps syntax - // instead. The option should only be implicitly set by the proto compiler - // parser. MapEntry *bool `protobuf:"varint,7,opt,name=map_entry,json=mapEntry" json:"map_entry,omitempty"` + // Enable the legacy handling of JSON field name conflicts. This lowercases + // and strips underscored from the fields before comparison in proto3 only. + // The new behavior takes `json_name` into account and applies to proto2 as + // well. + // + // This should only be used as a temporary measure against broken builds due + // to the change in behavior for JSON field name conflicts. + // + // TODO(b/261750190) This is legacy behavior we plan to remove once downstream + // teams have had time to migrate. + // + // Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. + DeprecatedLegacyJsonFieldConflicts *bool `protobuf:"varint,11,opt,name=deprecated_legacy_json_field_conflicts,json=deprecatedLegacyJsonFieldConflicts" json:"deprecated_legacy_json_field_conflicts,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` } @@ -1785,6 +2023,14 @@ func (x *MessageOptions) GetMapEntry() bool { return false } +// Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. +func (x *MessageOptions) GetDeprecatedLegacyJsonFieldConflicts() bool { + if x != nil && x.DeprecatedLegacyJsonFieldConflicts != nil { + return *x.DeprecatedLegacyJsonFieldConflicts + } + return false +} + func (x *MessageOptions) GetUninterpretedOption() []*UninterpretedOption { if x != nil { return x.UninterpretedOption @@ -1838,7 +2084,6 @@ type FieldOptions struct { // call from multiple threads concurrently, while non-const methods continue // to require exclusive access. // - // // Note that implementations may choose not to check required fields within // a lazy sub-message. That is, calling IsInitialized() on the outer message // may return true even if the inner message has missing required fields. @@ -1849,7 +2094,14 @@ type FieldOptions struct { // implementation must either *always* check its required fields, or *never* // check its required fields, regardless of whether or not the message has // been parsed. + // + // As of May 2022, lazy verifies the contents of the byte stream during + // parsing. An invalid byte stream will cause the overall parsing to fail. Lazy *bool `protobuf:"varint,5,opt,name=lazy,def=0" json:"lazy,omitempty"` + // unverified_lazy does no correctness checks on the byte stream. This should + // only be used where lazy with verification is prohibitive for performance + // reasons. + UnverifiedLazy *bool `protobuf:"varint,15,opt,name=unverified_lazy,json=unverifiedLazy,def=0" json:"unverified_lazy,omitempty"` // Is this field deprecated? // Depending on the target platform, this can emit Deprecated annotations // for accessors, or it will be completely ignored; in the very least, this @@ -1857,17 +2109,24 @@ type FieldOptions struct { Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` // For Google-internal migration only. Do not use. Weak *bool `protobuf:"varint,10,opt,name=weak,def=0" json:"weak,omitempty"` + // Indicate that the field value should not be printed out when using debug + // formats, e.g. when the field contains sensitive credentials. + DebugRedact *bool `protobuf:"varint,16,opt,name=debug_redact,json=debugRedact,def=0" json:"debug_redact,omitempty"` + Retention *FieldOptions_OptionRetention `protobuf:"varint,17,opt,name=retention,enum=google.protobuf.FieldOptions_OptionRetention" json:"retention,omitempty"` + Target *FieldOptions_OptionTargetType `protobuf:"varint,18,opt,name=target,enum=google.protobuf.FieldOptions_OptionTargetType" json:"target,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` } // Default values for FieldOptions fields. const ( - Default_FieldOptions_Ctype = FieldOptions_STRING - Default_FieldOptions_Jstype = FieldOptions_JS_NORMAL - Default_FieldOptions_Lazy = bool(false) - Default_FieldOptions_Deprecated = bool(false) - Default_FieldOptions_Weak = bool(false) + Default_FieldOptions_Ctype = FieldOptions_STRING + Default_FieldOptions_Jstype = FieldOptions_JS_NORMAL + Default_FieldOptions_Lazy = bool(false) + Default_FieldOptions_UnverifiedLazy = bool(false) + Default_FieldOptions_Deprecated = bool(false) + Default_FieldOptions_Weak = bool(false) + Default_FieldOptions_DebugRedact = bool(false) ) func (x *FieldOptions) Reset() { @@ -1930,6 +2189,13 @@ func (x *FieldOptions) GetLazy() bool { return Default_FieldOptions_Lazy } +func (x *FieldOptions) GetUnverifiedLazy() bool { + if x != nil && x.UnverifiedLazy != nil { + return *x.UnverifiedLazy + } + return Default_FieldOptions_UnverifiedLazy +} + func (x *FieldOptions) GetDeprecated() bool { if x != nil && x.Deprecated != nil { return *x.Deprecated @@ -1944,6 +2210,27 @@ func (x *FieldOptions) GetWeak() bool { return Default_FieldOptions_Weak } +func (x *FieldOptions) GetDebugRedact() bool { + if x != nil && x.DebugRedact != nil { + return *x.DebugRedact + } + return Default_FieldOptions_DebugRedact +} + +func (x *FieldOptions) GetRetention() FieldOptions_OptionRetention { + if x != nil && x.Retention != nil { + return *x.Retention + } + return FieldOptions_RETENTION_UNKNOWN +} + +func (x *FieldOptions) GetTarget() FieldOptions_OptionTargetType { + if x != nil && x.Target != nil { + return *x.Target + } + return FieldOptions_TARGET_TYPE_UNKNOWN +} + func (x *FieldOptions) GetUninterpretedOption() []*UninterpretedOption { if x != nil { return x.UninterpretedOption @@ -2014,6 +2301,15 @@ type EnumOptions struct { // for the enum, or it will be completely ignored; in the very least, this // is a formalization for deprecating enums. Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // Enable the legacy handling of JSON field name conflicts. This lowercases + // and strips underscored from the fields before comparison in proto3 only. + // The new behavior takes `json_name` into account and applies to proto2 as + // well. + // TODO(b/261750190) Remove this legacy behavior once downstream teams have + // had time to migrate. + // + // Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. + DeprecatedLegacyJsonFieldConflicts *bool `protobuf:"varint,6,opt,name=deprecated_legacy_json_field_conflicts,json=deprecatedLegacyJsonFieldConflicts" json:"deprecated_legacy_json_field_conflicts,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` } @@ -2069,6 +2365,14 @@ func (x *EnumOptions) GetDeprecated() bool { return Default_EnumOptions_Deprecated } +// Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. +func (x *EnumOptions) GetDeprecatedLegacyJsonFieldConflicts() bool { + if x != nil && x.DeprecatedLegacyJsonFieldConflicts != nil { + return *x.DeprecatedLegacyJsonFieldConflicts + } + return false +} + func (x *EnumOptions) GetUninterpretedOption() []*UninterpretedOption { if x != nil { return x.UninterpretedOption @@ -2399,43 +2703,48 @@ type SourceCodeInfo struct { // tools. // // For example, say we have a file like: - // message Foo { - // optional string foo = 1; - // } + // + // message Foo { + // optional string foo = 1; + // } + // // Let's look at just the field definition: - // optional string foo = 1; - // ^ ^^ ^^ ^ ^^^ - // a bc de f ghi + // + // optional string foo = 1; + // ^ ^^ ^^ ^ ^^^ + // a bc de f ghi + // // We have the following locations: - // span path represents - // [a,i) [ 4, 0, 2, 0 ] The whole field definition. - // [a,b) [ 4, 0, 2, 0, 4 ] The label (optional). - // [c,d) [ 4, 0, 2, 0, 5 ] The type (string). - // [e,f) [ 4, 0, 2, 0, 1 ] The name (foo). - // [g,h) [ 4, 0, 2, 0, 3 ] The number (1). + // + // span path represents + // [a,i) [ 4, 0, 2, 0 ] The whole field definition. + // [a,b) [ 4, 0, 2, 0, 4 ] The label (optional). + // [c,d) [ 4, 0, 2, 0, 5 ] The type (string). + // [e,f) [ 4, 0, 2, 0, 1 ] The name (foo). + // [g,h) [ 4, 0, 2, 0, 3 ] The number (1). // // Notes: - // - A location may refer to a repeated field itself (i.e. not to any - // particular index within it). This is used whenever a set of elements are - // logically enclosed in a single code segment. For example, an entire - // extend block (possibly containing multiple extension definitions) will - // have an outer location whose path refers to the "extensions" repeated - // field without an index. - // - Multiple locations may have the same path. This happens when a single - // logical declaration is spread out across multiple places. The most - // obvious example is the "extend" block again -- there may be multiple - // extend blocks in the same scope, each of which will have the same path. - // - A location's span is not always a subset of its parent's span. For - // example, the "extendee" of an extension declaration appears at the - // beginning of the "extend" block and is shared by all extensions within - // the block. - // - Just because a location's span is a subset of some other location's span - // does not mean that it is a descendant. For example, a "group" defines - // both a type and a field in a single declaration. Thus, the locations - // corresponding to the type and field and their components will overlap. - // - Code which tries to interpret locations should probably be designed to - // ignore those that it doesn't understand, as more types of locations could - // be recorded in the future. + // - A location may refer to a repeated field itself (i.e. not to any + // particular index within it). This is used whenever a set of elements are + // logically enclosed in a single code segment. For example, an entire + // extend block (possibly containing multiple extension definitions) will + // have an outer location whose path refers to the "extensions" repeated + // field without an index. + // - Multiple locations may have the same path. This happens when a single + // logical declaration is spread out across multiple places. The most + // obvious example is the "extend" block again -- there may be multiple + // extend blocks in the same scope, each of which will have the same path. + // - A location's span is not always a subset of its parent's span. For + // example, the "extendee" of an extension declaration appears at the + // beginning of the "extend" block and is shared by all extensions within + // the block. + // - Just because a location's span is a subset of some other location's span + // does not mean that it is a descendant. For example, a "group" defines + // both a type and a field in a single declaration. Thus, the locations + // corresponding to the type and field and their components will overlap. + // - Code which tries to interpret locations should probably be designed to + // ignore those that it doesn't understand, as more types of locations could + // be recorded in the future. Location []*SourceCodeInfo_Location `protobuf:"bytes,1,rep,name=location" json:"location,omitempty"` } @@ -2715,8 +3024,8 @@ func (x *EnumDescriptorProto_EnumReservedRange) GetEnd() int32 { // The name of the uninterpreted option. Each string represents a segment in // a dot-separated name. is_extension is true iff a segment represents an // extension (denoted with parentheses in options specs in .proto files). -// E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents -// "foo.(bar.baz).qux". +// E.g.,{ ["foo", false], ["bar.baz", true], ["moo", false] } represents +// "foo.(bar.baz).moo". type UninterpretedOption_NamePart struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -2781,23 +3090,34 @@ type SourceCodeInfo_Location struct { // location. // // Each element is a field number or an index. They form a path from - // the root FileDescriptorProto to the place where the definition. For - // example, this path: - // [ 4, 3, 2, 7, 1 ] + // the root FileDescriptorProto to the place where the definition occurs. + // For example, this path: + // + // [ 4, 3, 2, 7, 1 ] + // // refers to: - // file.message_type(3) // 4, 3 - // .field(7) // 2, 7 - // .name() // 1 + // + // file.message_type(3) // 4, 3 + // .field(7) // 2, 7 + // .name() // 1 + // // This is because FileDescriptorProto.message_type has field number 4: - // repeated DescriptorProto message_type = 4; + // + // repeated DescriptorProto message_type = 4; + // // and DescriptorProto.field has field number 2: - // repeated FieldDescriptorProto field = 2; + // + // repeated FieldDescriptorProto field = 2; + // // and FieldDescriptorProto.name has field number 1: - // optional string name = 1; + // + // optional string name = 1; // // Thus, the above path gives the location of a field name. If we removed // the last element: - // [ 4, 3, 2, 7 ] + // + // [ 4, 3, 2, 7 ] + // // this path refers to the whole field declaration (from the beginning // of the label to the terminating semicolon). Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` @@ -2826,34 +3146,34 @@ type SourceCodeInfo_Location struct { // // Examples: // - // optional int32 foo = 1; // Comment attached to foo. - // // Comment attached to bar. - // optional int32 bar = 2; + // optional int32 foo = 1; // Comment attached to foo. + // // Comment attached to bar. + // optional int32 bar = 2; // - // optional string baz = 3; - // // Comment attached to baz. - // // Another line attached to baz. + // optional string baz = 3; + // // Comment attached to baz. + // // Another line attached to baz. // - // // Comment attached to qux. - // // - // // Another line attached to qux. - // optional double qux = 4; + // // Comment attached to moo. + // // + // // Another line attached to moo. + // optional double moo = 4; // - // // Detached comment for corge. This is not leading or trailing comments - // // to qux or corge because there are blank lines separating it from - // // both. + // // Detached comment for corge. This is not leading or trailing comments + // // to moo or corge because there are blank lines separating it from + // // both. // - // // Detached comment for corge paragraph 2. + // // Detached comment for corge paragraph 2. // - // optional string corge = 5; - // /* Block comment attached - // * to corge. Leading asterisks - // * will be removed. */ - // /* Block comment attached to - // * grault. */ - // optional int32 grault = 6; + // optional string corge = 5; + // /* Block comment attached + // * to corge. Leading asterisks + // * will be removed. */ + // /* Block comment attached to + // * grault. */ + // optional int32 grault = 6; // - // // ignored detached comments. + // // ignored detached comments. LeadingComments *string `protobuf:"bytes,3,opt,name=leading_comments,json=leadingComments" json:"leading_comments,omitempty"` TrailingComments *string `protobuf:"bytes,4,opt,name=trailing_comments,json=trailingComments" json:"trailing_comments,omitempty"` LeadingDetachedComments []string `protobuf:"bytes,6,rep,name=leading_detached_comments,json=leadingDetachedComments" json:"leading_detached_comments,omitempty"` @@ -2940,9 +3260,10 @@ type GeneratedCodeInfo_Annotation struct { // that relates to the identified object. Begin *int32 `protobuf:"varint,3,opt,name=begin" json:"begin,omitempty"` // Identifies the ending offset in bytes in the generated code that - // relates to the identified offset. The end offset should be one past + // relates to the identified object. The end offset should be one past // the last relevant byte (so the length of the text = end - begin). - End *int32 `protobuf:"varint,4,opt,name=end" json:"end,omitempty"` + End *int32 `protobuf:"varint,4,opt,name=end" json:"end,omitempty"` + Semantic *GeneratedCodeInfo_Annotation_Semantic `protobuf:"varint,5,opt,name=semantic,enum=google.protobuf.GeneratedCodeInfo_Annotation_Semantic" json:"semantic,omitempty"` } func (x *GeneratedCodeInfo_Annotation) Reset() { @@ -3005,6 +3326,13 @@ func (x *GeneratedCodeInfo_Annotation) GetEnd() int32 { return 0 } +func (x *GeneratedCodeInfo_Annotation) GetSemantic() GeneratedCodeInfo_Annotation_Semantic { + if x != nil && x.Semantic != nil { + return *x.Semantic + } + return GeneratedCodeInfo_Annotation_NONE +} + var File_google_protobuf_descriptor_proto protoreflect.FileDescriptor var file_google_protobuf_descriptor_proto_rawDesc = []byte{ @@ -3016,7 +3344,7 @@ var file_google_protobuf_descriptor_proto_rawDesc = []byte{ 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x04, 0x66, 0x69, - 0x6c, 0x65, 0x22, 0xe4, 0x04, 0x0a, 0x13, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, + 0x6c, 0x65, 0x22, 0xfe, 0x04, 0x0a, 0x13, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, @@ -3054,330 +3382,391 @@ var file_google_protobuf_descriptor_proto_rawDesc = []byte{ 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x18, 0x0c, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x22, 0xb9, 0x06, 0x0a, 0x0f, 0x44, 0x65, - 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, - 0x65, 0x12, 0x3b, 0x0a, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, - 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x43, - 0x0a, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, - 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0b, 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x79, - 0x70, 0x65, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0a, 0x6e, 0x65, 0x73, 0x74, - 0x65, 0x64, 0x54, 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x09, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x74, - 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, - 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, - 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x58, 0x0a, 0x0f, 0x65, 0x78, 0x74, - 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x05, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x09, 0x52, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x64, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x22, 0xb9, 0x06, 0x0a, 0x0f, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x05, 0x66, + 0x69, 0x65, 0x6c, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, + 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x52, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x43, 0x0a, 0x09, 0x65, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, + 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x52, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, + 0x0b, 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, - 0x6e, 0x67, 0x65, 0x52, 0x0e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, - 0x6e, 0x67, 0x65, 0x12, 0x44, 0x0a, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x64, 0x65, 0x63, - 0x6c, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x44, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x09, - 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x63, 0x6c, 0x12, 0x39, 0x0a, 0x07, 0x6f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x73, - 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x55, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, - 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x67, + 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0a, 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x54, 0x79, 0x70, 0x65, + 0x12, 0x41, 0x0a, 0x09, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, + 0x79, 0x70, 0x65, 0x12, 0x58, 0x0a, 0x0f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x52, - 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0d, 0x72, 0x65, - 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, - 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x03, - 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, - 0x1a, 0x7a, 0x0a, 0x0e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, - 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x12, 0x40, 0x0a, 0x07, 0x6f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, - 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x37, 0x0a, 0x0d, - 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, - 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, - 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, - 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0x7c, 0x0a, 0x15, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, - 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x58, - 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, - 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, - 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, - 0x80, 0x80, 0x02, 0x22, 0xc1, 0x06, 0x0a, 0x14, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, - 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x41, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65, - 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x4c, - 0x61, 0x62, 0x65, 0x6c, 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x3e, 0x0a, 0x04, 0x74, - 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, - 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, - 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x74, - 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, - 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x65, 0x78, 0x74, 0x65, - 0x6e, 0x64, 0x65, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x78, 0x74, 0x65, - 0x6e, 0x64, 0x65, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x65, 0x66, - 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x6e, 0x65, - 0x6f, 0x66, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, - 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x1b, 0x0a, 0x09, 0x6a, 0x73, - 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6a, - 0x73, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x61, 0x6c, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x33, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x22, 0xb6, 0x02, 0x0a, 0x04, 0x54, 0x79, - 0x70, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x44, 0x4f, 0x55, 0x42, 0x4c, - 0x45, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x4c, 0x4f, 0x41, - 0x54, 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x36, - 0x34, 0x10, 0x03, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, - 0x36, 0x34, 0x10, 0x04, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x54, - 0x33, 0x32, 0x10, 0x05, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x58, - 0x45, 0x44, 0x36, 0x34, 0x10, 0x06, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, - 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x07, 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, - 0x5f, 0x42, 0x4f, 0x4f, 0x4c, 0x10, 0x08, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, - 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x09, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, - 0x5f, 0x47, 0x52, 0x4f, 0x55, 0x50, 0x10, 0x0a, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, - 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x0b, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, - 0x50, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x0c, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, - 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x0d, 0x12, 0x0d, 0x0a, 0x09, 0x54, - 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x0e, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, - 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x0f, 0x12, 0x11, 0x0a, - 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34, 0x10, 0x10, - 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, - 0x11, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54, 0x36, 0x34, - 0x10, 0x12, 0x22, 0x43, 0x0a, 0x05, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x12, 0x0a, 0x0e, 0x4c, - 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x41, 0x4c, 0x10, 0x01, 0x12, - 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, - 0x44, 0x10, 0x02, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x50, - 0x45, 0x41, 0x54, 0x45, 0x44, 0x10, 0x03, 0x22, 0x63, 0x0a, 0x14, 0x4f, 0x6e, 0x65, 0x6f, 0x66, - 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, - 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xe3, 0x02, 0x0a, - 0x13, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, - 0x74, 0x6f, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x36, 0x0a, 0x07, 0x6f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, - 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x12, 0x5d, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x72, 0x61, - 0x6e, 0x67, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, - 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, - 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, - 0x65, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, - 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, - 0x65, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x3b, 0x0a, 0x11, 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x73, - 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, - 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, - 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, - 0x6e, 0x64, 0x22, 0x83, 0x01, 0x0a, 0x18, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, - 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, - 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x3b, 0x0a, 0x07, 0x6f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, - 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, - 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xa7, 0x01, 0x0a, 0x16, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3e, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, - 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, - 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, - 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x39, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x22, 0x89, 0x02, 0x0a, 0x15, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x44, 0x65, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, - 0x1f, 0x0a, 0x0b, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x54, 0x79, 0x70, 0x65, - 0x12, 0x38, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x30, 0x0a, 0x10, 0x63, 0x6c, - 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, 0x63, 0x6c, 0x69, - 0x65, 0x6e, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x12, 0x30, 0x0a, 0x10, - 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, - 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, 0x73, - 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x22, 0x91, - 0x09, 0x0a, 0x0b, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x21, - 0x0a, 0x0c, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6a, 0x61, 0x76, 0x61, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, - 0x65, 0x12, 0x30, 0x0a, 0x14, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6f, 0x75, 0x74, 0x65, 0x72, 0x5f, - 0x63, 0x6c, 0x61, 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x12, 0x6a, 0x61, 0x76, 0x61, 0x4f, 0x75, 0x74, 0x65, 0x72, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x6e, - 0x61, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6d, 0x75, 0x6c, 0x74, - 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, - 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x6a, 0x61, 0x76, 0x61, 0x4d, 0x75, 0x6c, - 0x74, 0x69, 0x70, 0x6c, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x12, 0x44, 0x0a, 0x1d, 0x6a, 0x61, - 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x65, 0x71, 0x75, 0x61, - 0x6c, 0x73, 0x5f, 0x61, 0x6e, 0x64, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x14, 0x20, 0x01, 0x28, - 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x19, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, 0x65, 0x72, - 0x61, 0x74, 0x65, 0x45, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x41, 0x6e, 0x64, 0x48, 0x61, 0x73, 0x68, - 0x12, 0x3a, 0x0a, 0x16, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, - 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x75, 0x74, 0x66, 0x38, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x08, - 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x53, 0x74, 0x72, - 0x69, 0x6e, 0x67, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x55, 0x74, 0x66, 0x38, 0x12, 0x53, 0x0a, 0x0c, - 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x18, 0x09, 0x20, 0x01, - 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x3a, 0x05, 0x53, - 0x50, 0x45, 0x45, 0x44, 0x52, 0x0b, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x46, 0x6f, - 0x72, 0x12, 0x1d, 0x0a, 0x0a, 0x67, 0x6f, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, - 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x67, 0x6f, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, - 0x12, 0x35, 0x0a, 0x13, 0x63, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, - 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x63, 0x63, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x15, 0x6a, 0x61, 0x76, 0x61, 0x5f, - 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, - 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, - 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x73, 0x12, 0x35, 0x0a, 0x13, 0x70, 0x79, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, - 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x12, 0x20, 0x01, 0x28, 0x08, 0x3a, - 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x70, 0x79, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, - 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x37, 0x0a, 0x14, 0x70, 0x68, 0x70, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0e, 0x65, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x44, 0x0a, + 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x64, 0x65, 0x63, 0x6c, 0x18, 0x08, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x09, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x44, + 0x65, 0x63, 0x6c, 0x12, 0x39, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x55, + 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, + 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, + 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, + 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, + 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, + 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x7a, 0x0a, 0x0e, 0x45, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, + 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, + 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x03, 0x65, 0x6e, 0x64, 0x12, 0x40, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x37, 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, + 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, + 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, + 0x7c, 0x0a, 0x15, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, + 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, + 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, + 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, + 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xc1, 0x06, + 0x0a, 0x14, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, + 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, + 0x6d, 0x62, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, + 0x65, 0x72, 0x12, 0x41, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x52, 0x05, + 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x3e, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, + 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, 0x12, 0x23, + 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x69, 0x6e, 0x64, + 0x65, 0x78, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x49, + 0x6e, 0x64, 0x65, 0x78, 0x12, 0x1b, 0x0a, 0x09, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6a, 0x73, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, + 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x18, 0x11, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x0e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x61, 0x6c, 0x22, 0xb6, 0x02, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0f, 0x0a, 0x0b, + 0x54, 0x59, 0x50, 0x45, 0x5f, 0x44, 0x4f, 0x55, 0x42, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x0e, 0x0a, + 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x4c, 0x4f, 0x41, 0x54, 0x10, 0x02, 0x12, 0x0e, 0x0a, + 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x03, 0x12, 0x0f, 0x0a, + 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x04, 0x12, 0x0e, + 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x05, 0x12, 0x10, + 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34, 0x10, 0x06, + 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, + 0x10, 0x07, 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x4f, 0x4f, 0x4c, 0x10, + 0x08, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, + 0x10, 0x09, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x47, 0x52, 0x4f, 0x55, 0x50, + 0x10, 0x0a, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, + 0x47, 0x45, 0x10, 0x0b, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x59, 0x54, + 0x45, 0x53, 0x10, 0x0c, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, + 0x54, 0x33, 0x32, 0x10, 0x0d, 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, + 0x55, 0x4d, 0x10, 0x0e, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, + 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x0f, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34, 0x10, 0x10, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, + 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x11, 0x12, 0x0f, 0x0a, 0x0b, 0x54, + 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x12, 0x22, 0x43, 0x0a, 0x05, + 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x4f, + 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x41, 0x4c, 0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, + 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0x02, 0x12, 0x12, 0x0a, + 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x50, 0x45, 0x41, 0x54, 0x45, 0x44, 0x10, + 0x03, 0x22, 0x63, 0x0a, 0x14, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, + 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xe3, 0x02, 0x0a, 0x13, 0x45, 0x6e, 0x75, 0x6d, 0x44, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, + 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x12, 0x36, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x5d, 0x0a, 0x0e, 0x72, + 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x04, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, + 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0d, 0x72, 0x65, 0x73, + 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, + 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x03, 0x28, + 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x1a, + 0x3b, 0x0a, 0x11, 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, + 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, + 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0x83, 0x01, 0x0a, + 0x18, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, + 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, + 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x3b, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x22, 0xa7, 0x01, 0x0a, 0x16, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x44, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x12, 0x3e, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, + 0x64, 0x12, 0x39, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x89, 0x02, 0x0a, + 0x15, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, + 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x6e, + 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, + 0x69, 0x6e, 0x70, 0x75, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x75, 0x74, + 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, + 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x38, 0x0a, 0x07, 0x6f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, + 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x30, 0x0a, 0x10, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, + 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, + 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x72, + 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x12, 0x30, 0x0a, 0x10, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, + 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, + 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x22, 0x91, 0x09, 0x0a, 0x0b, 0x46, 0x69, 0x6c, + 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6a, 0x61, 0x76, 0x61, + 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, + 0x6a, 0x61, 0x76, 0x61, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x30, 0x0a, 0x14, 0x6a, + 0x61, 0x76, 0x61, 0x5f, 0x6f, 0x75, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x6a, 0x61, 0x76, 0x61, 0x4f, + 0x75, 0x74, 0x65, 0x72, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x35, 0x0a, + 0x13, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x66, + 0x69, 0x6c, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, + 0x65, 0x52, 0x11, 0x6a, 0x61, 0x76, 0x61, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x46, + 0x69, 0x6c, 0x65, 0x73, 0x12, 0x44, 0x0a, 0x1d, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x65, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x5f, 0x61, 0x6e, 0x64, + 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x14, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, + 0x19, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x45, 0x71, 0x75, + 0x61, 0x6c, 0x73, 0x41, 0x6e, 0x64, 0x48, 0x61, 0x73, 0x68, 0x12, 0x3a, 0x0a, 0x16, 0x6a, 0x61, + 0x76, 0x61, 0x5f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, + 0x75, 0x74, 0x66, 0x38, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, + 0x65, 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x43, 0x68, 0x65, + 0x63, 0x6b, 0x55, 0x74, 0x66, 0x38, 0x12, 0x53, 0x0a, 0x0c, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, + 0x7a, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, + 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6d, + 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x3a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44, 0x52, 0x0b, + 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x46, 0x6f, 0x72, 0x12, 0x1d, 0x0a, 0x0a, 0x67, + 0x6f, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x09, 0x67, 0x6f, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x35, 0x0a, 0x13, 0x63, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x73, 0x18, 0x2a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x12, - 0x70, 0x68, 0x70, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, - 0x18, 0x17, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, - 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x10, 0x63, 0x63, 0x5f, - 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x61, 0x72, 0x65, 0x6e, 0x61, 0x73, 0x18, 0x1f, 0x20, - 0x01, 0x28, 0x08, 0x3a, 0x04, 0x74, 0x72, 0x75, 0x65, 0x52, 0x0e, 0x63, 0x63, 0x45, 0x6e, 0x61, - 0x62, 0x6c, 0x65, 0x41, 0x72, 0x65, 0x6e, 0x61, 0x73, 0x12, 0x2a, 0x0a, 0x11, 0x6f, 0x62, 0x6a, - 0x63, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x24, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x63, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x50, - 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x73, 0x68, 0x61, 0x72, 0x70, 0x5f, - 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x25, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0f, 0x63, 0x73, 0x68, 0x61, 0x72, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, - 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x77, 0x69, 0x66, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, - 0x18, 0x27, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x77, 0x69, 0x66, 0x74, 0x50, 0x72, 0x65, - 0x66, 0x69, 0x78, 0x12, 0x28, 0x0a, 0x10, 0x70, 0x68, 0x70, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, - 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x28, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x70, - 0x68, 0x70, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x23, 0x0a, - 0x0d, 0x70, 0x68, 0x70, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x29, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x70, 0x68, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x12, 0x34, 0x0a, 0x16, 0x70, 0x68, 0x70, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, - 0x74, 0x61, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x2c, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x14, 0x70, 0x68, 0x70, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4e, - 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x75, 0x62, 0x79, - 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x2d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, - 0x72, 0x75, 0x62, 0x79, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x58, 0x0a, 0x14, 0x75, - 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, - 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x3a, 0x0a, 0x0c, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, - 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x09, 0x0a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44, 0x10, 0x01, - 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x10, 0x02, 0x12, - 0x10, 0x0a, 0x0c, 0x4c, 0x49, 0x54, 0x45, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, - 0x03, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x26, - 0x10, 0x27, 0x22, 0xd1, 0x02, 0x0a, 0x0e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3c, 0x0a, 0x17, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x5f, 0x73, 0x65, 0x74, 0x5f, 0x77, 0x69, 0x72, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x14, 0x6d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x57, 0x69, 0x72, 0x65, 0x46, 0x6f, 0x72, - 0x6d, 0x61, 0x74, 0x12, 0x4c, 0x0a, 0x1f, 0x6e, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x61, - 0x72, 0x64, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x61, 0x63, - 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, - 0x6c, 0x73, 0x65, 0x52, 0x1c, 0x6e, 0x6f, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x44, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, - 0x72, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, - 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x70, 0x5f, - 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6d, 0x61, 0x70, - 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, + 0x63, 0x63, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x73, 0x12, 0x39, 0x0a, 0x15, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, + 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, + 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, + 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x35, 0x0a, 0x13, + 0x70, 0x79, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x73, 0x18, 0x12, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, + 0x52, 0x11, 0x70, 0x79, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x73, 0x12, 0x37, 0x0a, 0x14, 0x70, 0x68, 0x70, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, + 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x2a, 0x20, 0x01, 0x28, + 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x12, 0x70, 0x68, 0x70, 0x47, 0x65, 0x6e, + 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a, + 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x17, 0x20, 0x01, 0x28, 0x08, + 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, + 0x74, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x10, 0x63, 0x63, 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, + 0x5f, 0x61, 0x72, 0x65, 0x6e, 0x61, 0x73, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x04, 0x74, + 0x72, 0x75, 0x65, 0x52, 0x0e, 0x63, 0x63, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x41, 0x72, 0x65, + 0x6e, 0x61, 0x73, 0x12, 0x2a, 0x0a, 0x11, 0x6f, 0x62, 0x6a, 0x63, 0x5f, 0x63, 0x6c, 0x61, 0x73, + 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x24, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, + 0x6f, 0x62, 0x6a, 0x63, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, + 0x29, 0x0a, 0x10, 0x63, 0x73, 0x68, 0x61, 0x72, 0x70, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x18, 0x25, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, 0x73, 0x68, 0x61, 0x72, + 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x77, + 0x69, 0x66, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x27, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0b, 0x73, 0x77, 0x69, 0x66, 0x74, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x28, 0x0a, + 0x10, 0x70, 0x68, 0x70, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, + 0x78, 0x18, 0x28, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x70, 0x68, 0x70, 0x43, 0x6c, 0x61, 0x73, + 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x23, 0x0a, 0x0d, 0x70, 0x68, 0x70, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x29, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, + 0x70, 0x68, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x34, 0x0a, 0x16, + 0x70, 0x68, 0x70, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6e, 0x61, 0x6d, + 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x2c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x70, 0x68, + 0x70, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x75, 0x62, 0x79, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, + 0x67, 0x65, 0x18, 0x2d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x75, 0x62, 0x79, 0x50, 0x61, + 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, - 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, - 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, - 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x22, 0xe2, 0x03, 0x0a, 0x0c, 0x46, 0x69, 0x65, 0x6c, 0x64, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x41, 0x0a, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, + 0x3a, 0x0a, 0x0c, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x12, + 0x09, 0x0a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x4f, + 0x44, 0x45, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, 0x4c, 0x49, 0x54, + 0x45, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x03, 0x2a, 0x09, 0x08, 0xe8, 0x07, + 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x26, 0x10, 0x27, 0x22, 0xbb, 0x03, 0x0a, + 0x0e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, + 0x3c, 0x0a, 0x17, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x74, 0x5f, 0x77, + 0x69, 0x72, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, + 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x14, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x53, 0x65, 0x74, 0x57, 0x69, 0x72, 0x65, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x4c, 0x0a, + 0x1f, 0x6e, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x5f, 0x64, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x1c, 0x6e, + 0x6f, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x6f, 0x72, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x12, 0x25, 0x0a, 0x0a, 0x64, + 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, + 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, + 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x70, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, + 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6c, 0x65, + 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, + 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x42, + 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x4c, + 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, + 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, + 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, + 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, + 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, 0x4a, 0x04, + 0x08, 0x08, 0x10, 0x09, 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x22, 0xb7, 0x08, 0x0a, 0x0c, 0x46, + 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x41, 0x0a, 0x05, 0x63, + 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, + 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x43, 0x54, 0x79, 0x70, 0x65, 0x3a, + 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x52, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, + 0x0a, 0x06, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, + 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x12, 0x47, 0x0a, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x43, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x06, 0x53, 0x54, 0x52, - 0x49, 0x4e, 0x47, 0x52, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, - 0x63, 0x6b, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x70, 0x61, 0x63, 0x6b, - 0x65, 0x64, 0x12, 0x47, 0x0a, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, - 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x2e, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, - 0x4d, 0x41, 0x4c, 0x52, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x12, 0x19, 0x0a, 0x04, 0x6c, - 0x61, 0x7a, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, - 0x52, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, - 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, - 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x19, 0x0a, - 0x04, 0x77, 0x65, 0x61, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, - 0x73, 0x65, 0x52, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, - 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, - 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, - 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x22, 0x2f, 0x0a, 0x05, 0x43, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, 0x0a, 0x06, 0x53, - 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x43, 0x4f, 0x52, 0x44, 0x10, - 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x50, 0x49, 0x45, 0x43, - 0x45, 0x10, 0x02, 0x22, 0x35, 0x0a, 0x06, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0d, 0x0a, - 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, - 0x4a, 0x53, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x4a, - 0x53, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x10, 0x02, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, - 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x22, 0x73, 0x0a, 0x0c, 0x4f, - 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x09, 0x4a, 0x53, + 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x52, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x12, + 0x19, 0x0a, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, + 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x12, 0x2e, 0x0a, 0x0f, 0x75, 0x6e, + 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x0f, 0x20, + 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0e, 0x75, 0x6e, 0x76, 0x65, + 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x4c, 0x61, 0x7a, 0x79, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, + 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, + 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, + 0x64, 0x12, 0x19, 0x0a, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, + 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x12, 0x28, 0x0a, 0x0c, + 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, 0x18, 0x10, 0x20, 0x01, + 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, + 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, 0x12, 0x4b, 0x0a, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, + 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x46, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x12, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, + 0x79, 0x70, 0x65, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, - 0x22, 0xc0, 0x01, 0x0a, 0x0b, 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x41, 0x6c, 0x69, 0x61, - 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, - 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, - 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, - 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, - 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, - 0x05, 0x10, 0x06, 0x22, 0x9e, 0x01, 0x0a, 0x10, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x2f, 0x0a, 0x05, 0x43, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, + 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x43, 0x4f, + 0x52, 0x44, 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x50, + 0x49, 0x45, 0x43, 0x45, 0x10, 0x02, 0x22, 0x35, 0x0a, 0x06, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, + 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10, 0x00, 0x12, + 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0d, + 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x10, 0x02, 0x22, 0x55, 0x0a, + 0x0f, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, + 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, + 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x01, 0x12, 0x14, + 0x0a, 0x10, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x4f, 0x55, 0x52, + 0x43, 0x45, 0x10, 0x02, 0x22, 0x8c, 0x02, 0x0a, 0x10, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, + 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, + 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, + 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, + 0x45, 0x5f, 0x46, 0x49, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x1f, 0x0a, 0x1b, 0x54, 0x41, 0x52, 0x47, + 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x58, 0x54, 0x45, 0x4e, 0x53, 0x49, 0x4f, + 0x4e, 0x5f, 0x52, 0x41, 0x4e, 0x47, 0x45, 0x10, 0x02, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, + 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, + 0x10, 0x03, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, + 0x45, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x10, 0x04, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, + 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4f, 0x4e, 0x45, 0x4f, 0x46, 0x10, 0x05, + 0x12, 0x14, 0x0a, 0x10, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x06, 0x12, 0x1a, 0x0a, 0x16, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, + 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x59, + 0x10, 0x07, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, + 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x10, 0x08, 0x12, 0x16, 0x0a, 0x12, 0x54, + 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f, + 0x44, 0x10, 0x09, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, + 0x08, 0x04, 0x10, 0x05, 0x22, 0x73, 0x0a, 0x0c, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, + 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, + 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, + 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, + 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x98, 0x02, 0x0a, 0x0b, 0x45, 0x6e, + 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x6c, 0x6c, + 0x6f, 0x77, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, + 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, + 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, + 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, + 0x64, 0x12, 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, + 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, + 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, + 0x64, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, + 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, + 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, + 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, + 0x08, 0x05, 0x10, 0x06, 0x22, 0x9e, 0x01, 0x0a, 0x10, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, + 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, + 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, + 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, + 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, + 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, + 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x9c, 0x01, 0x0a, 0x0e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, - 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, + 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, @@ -3385,97 +3774,95 @@ var file_google_protobuf_descriptor_proto_rawDesc = []byte{ 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, - 0x80, 0x80, 0x80, 0x02, 0x22, 0x9c, 0x01, 0x0a, 0x0e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, - 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, - 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x58, - 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, - 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, + 0x80, 0x80, 0x80, 0x02, 0x22, 0xe0, 0x02, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, + 0x61, 0x74, 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, + 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x71, 0x0a, + 0x11, 0x69, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x6c, 0x65, 0x76, + 0x65, 0x6c, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, + 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, + 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x3a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, + 0x4f, 0x54, 0x45, 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x52, 0x10, + 0x69, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, + 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, + 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, + 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x50, 0x0a, 0x10, 0x49, 0x64, + 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x17, + 0x0a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, + 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x4e, 0x4f, 0x5f, 0x53, 0x49, + 0x44, 0x45, 0x5f, 0x45, 0x46, 0x46, 0x45, 0x43, 0x54, 0x53, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, + 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x54, 0x10, 0x02, 0x2a, 0x09, 0x08, 0xe8, + 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x9a, 0x03, 0x0a, 0x13, 0x55, 0x6e, 0x69, 0x6e, + 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, - 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, - 0x80, 0x80, 0x02, 0x22, 0xe0, 0x02, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, - 0x74, 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, - 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x71, 0x0a, 0x11, - 0x69, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x6c, 0x65, 0x76, 0x65, - 0x6c, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, - 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x3a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, - 0x54, 0x45, 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x52, 0x10, 0x69, - 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, - 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, - 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, - 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x50, 0x0a, 0x10, 0x49, 0x64, 0x65, - 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x17, 0x0a, - 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, - 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x4e, 0x4f, 0x5f, 0x53, 0x49, 0x44, - 0x45, 0x5f, 0x45, 0x46, 0x46, 0x45, 0x43, 0x54, 0x53, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x49, - 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x54, 0x10, 0x02, 0x2a, 0x09, 0x08, 0xe8, 0x07, - 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x9a, 0x03, 0x0a, 0x13, 0x55, 0x6e, 0x69, 0x6e, 0x74, - 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, - 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, - 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d, - 0x65, 0x12, 0x29, 0x0a, 0x10, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x5f, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x69, 0x64, 0x65, - 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2c, 0x0a, 0x12, - 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x10, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, - 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x6e, 0x65, - 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, - 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x6f, 0x75, 0x62, - 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0b, - 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, - 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, - 0x0c, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x27, - 0x0a, 0x0f, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, - 0x74, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x4a, 0x0a, 0x08, 0x4e, 0x61, 0x6d, 0x65, 0x50, - 0x61, 0x72, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x74, - 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52, 0x08, 0x6e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, - 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, - 0x18, 0x02, 0x20, 0x02, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x73, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, - 0x69, 0x6f, 0x6e, 0x22, 0xa7, 0x02, 0x0a, 0x0e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, - 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x44, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xce, 0x01, 0x0a, - 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, - 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, - 0x68, 0x12, 0x16, 0x0a, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, 0x42, - 0x02, 0x10, 0x01, 0x52, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x12, 0x29, 0x0a, 0x10, 0x6c, 0x65, 0x61, - 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, - 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, - 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x10, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, - 0x73, 0x12, 0x3a, 0x0a, 0x19, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x65, 0x74, - 0x61, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x06, - 0x20, 0x03, 0x28, 0x09, 0x52, 0x17, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x74, - 0x61, 0x63, 0x68, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x22, 0xd1, 0x01, - 0x0a, 0x11, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, - 0x6e, 0x66, 0x6f, 0x12, 0x4d, 0x0a, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, - 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x1a, 0x6d, 0x0a, 0x0a, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, - 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x65, 0x67, - 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x12, - 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, - 0x64, 0x42, 0x7e, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x69, 0x6f, 0x6e, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x52, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, + 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x69, 0x64, + 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2c, 0x0a, + 0x12, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x10, 0x70, 0x6f, 0x73, 0x69, 0x74, + 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x6e, + 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, + 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x6f, 0x75, + 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, + 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c, + 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, + 0x27, 0x0a, 0x0f, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x5f, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, + 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x4a, 0x0a, 0x08, 0x4e, 0x61, 0x6d, 0x65, + 0x50, 0x61, 0x72, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x70, 0x61, 0x72, + 0x74, 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52, 0x08, 0x6e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, + 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x18, 0x02, 0x20, 0x02, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x73, 0x45, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xa7, 0x02, 0x0a, 0x0e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, + 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x44, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xce, 0x01, + 0x0a, 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, + 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, + 0x74, 0x68, 0x12, 0x16, 0x0a, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, + 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x12, 0x29, 0x0a, 0x10, 0x6c, 0x65, + 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, + 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, + 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x10, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, + 0x74, 0x73, 0x12, 0x3a, 0x0a, 0x19, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x65, + 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, + 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x17, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x44, 0x65, + 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x22, 0xd0, + 0x02, 0x0a, 0x11, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, + 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x4d, 0x0a, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, + 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x1a, 0xeb, 0x01, 0x0a, 0x0a, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, + 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x62, + 0x65, 0x67, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x62, 0x65, 0x67, 0x69, + 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, + 0x65, 0x6e, 0x64, 0x12, 0x52, 0x0a, 0x08, 0x73, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, + 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x52, 0x08, 0x73, + 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x22, 0x28, 0x0a, 0x08, 0x53, 0x65, 0x6d, 0x61, 0x6e, + 0x74, 0x69, 0x63, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x07, 0x0a, + 0x03, 0x53, 0x45, 0x54, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x49, 0x41, 0x53, 0x10, + 0x02, 0x42, 0x7e, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x10, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x48, 0x01, 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, @@ -3498,7 +3885,7 @@ func file_google_protobuf_descriptor_proto_rawDescGZIP() []byte { return file_google_protobuf_descriptor_proto_rawDescData } -var file_google_protobuf_descriptor_proto_enumTypes = make([]protoimpl.EnumInfo, 6) +var file_google_protobuf_descriptor_proto_enumTypes = make([]protoimpl.EnumInfo, 9) var file_google_protobuf_descriptor_proto_msgTypes = make([]protoimpl.MessageInfo, 27) var file_google_protobuf_descriptor_proto_goTypes = []interface{}{ (FieldDescriptorProto_Type)(0), // 0: google.protobuf.FieldDescriptorProto.Type @@ -3506,84 +3893,90 @@ var file_google_protobuf_descriptor_proto_goTypes = []interface{}{ (FileOptions_OptimizeMode)(0), // 2: google.protobuf.FileOptions.OptimizeMode (FieldOptions_CType)(0), // 3: google.protobuf.FieldOptions.CType (FieldOptions_JSType)(0), // 4: google.protobuf.FieldOptions.JSType - (MethodOptions_IdempotencyLevel)(0), // 5: google.protobuf.MethodOptions.IdempotencyLevel - (*FileDescriptorSet)(nil), // 6: google.protobuf.FileDescriptorSet - (*FileDescriptorProto)(nil), // 7: google.protobuf.FileDescriptorProto - (*DescriptorProto)(nil), // 8: google.protobuf.DescriptorProto - (*ExtensionRangeOptions)(nil), // 9: google.protobuf.ExtensionRangeOptions - (*FieldDescriptorProto)(nil), // 10: google.protobuf.FieldDescriptorProto - (*OneofDescriptorProto)(nil), // 11: google.protobuf.OneofDescriptorProto - (*EnumDescriptorProto)(nil), // 12: google.protobuf.EnumDescriptorProto - (*EnumValueDescriptorProto)(nil), // 13: google.protobuf.EnumValueDescriptorProto - (*ServiceDescriptorProto)(nil), // 14: google.protobuf.ServiceDescriptorProto - (*MethodDescriptorProto)(nil), // 15: google.protobuf.MethodDescriptorProto - (*FileOptions)(nil), // 16: google.protobuf.FileOptions - (*MessageOptions)(nil), // 17: google.protobuf.MessageOptions - (*FieldOptions)(nil), // 18: google.protobuf.FieldOptions - (*OneofOptions)(nil), // 19: google.protobuf.OneofOptions - (*EnumOptions)(nil), // 20: google.protobuf.EnumOptions - (*EnumValueOptions)(nil), // 21: google.protobuf.EnumValueOptions - (*ServiceOptions)(nil), // 22: google.protobuf.ServiceOptions - (*MethodOptions)(nil), // 23: google.protobuf.MethodOptions - (*UninterpretedOption)(nil), // 24: google.protobuf.UninterpretedOption - (*SourceCodeInfo)(nil), // 25: google.protobuf.SourceCodeInfo - (*GeneratedCodeInfo)(nil), // 26: google.protobuf.GeneratedCodeInfo - (*DescriptorProto_ExtensionRange)(nil), // 27: google.protobuf.DescriptorProto.ExtensionRange - (*DescriptorProto_ReservedRange)(nil), // 28: google.protobuf.DescriptorProto.ReservedRange - (*EnumDescriptorProto_EnumReservedRange)(nil), // 29: google.protobuf.EnumDescriptorProto.EnumReservedRange - (*UninterpretedOption_NamePart)(nil), // 30: google.protobuf.UninterpretedOption.NamePart - (*SourceCodeInfo_Location)(nil), // 31: google.protobuf.SourceCodeInfo.Location - (*GeneratedCodeInfo_Annotation)(nil), // 32: google.protobuf.GeneratedCodeInfo.Annotation + (FieldOptions_OptionRetention)(0), // 5: google.protobuf.FieldOptions.OptionRetention + (FieldOptions_OptionTargetType)(0), // 6: google.protobuf.FieldOptions.OptionTargetType + (MethodOptions_IdempotencyLevel)(0), // 7: google.protobuf.MethodOptions.IdempotencyLevel + (GeneratedCodeInfo_Annotation_Semantic)(0), // 8: google.protobuf.GeneratedCodeInfo.Annotation.Semantic + (*FileDescriptorSet)(nil), // 9: google.protobuf.FileDescriptorSet + (*FileDescriptorProto)(nil), // 10: google.protobuf.FileDescriptorProto + (*DescriptorProto)(nil), // 11: google.protobuf.DescriptorProto + (*ExtensionRangeOptions)(nil), // 12: google.protobuf.ExtensionRangeOptions + (*FieldDescriptorProto)(nil), // 13: google.protobuf.FieldDescriptorProto + (*OneofDescriptorProto)(nil), // 14: google.protobuf.OneofDescriptorProto + (*EnumDescriptorProto)(nil), // 15: google.protobuf.EnumDescriptorProto + (*EnumValueDescriptorProto)(nil), // 16: google.protobuf.EnumValueDescriptorProto + (*ServiceDescriptorProto)(nil), // 17: google.protobuf.ServiceDescriptorProto + (*MethodDescriptorProto)(nil), // 18: google.protobuf.MethodDescriptorProto + (*FileOptions)(nil), // 19: google.protobuf.FileOptions + (*MessageOptions)(nil), // 20: google.protobuf.MessageOptions + (*FieldOptions)(nil), // 21: google.protobuf.FieldOptions + (*OneofOptions)(nil), // 22: google.protobuf.OneofOptions + (*EnumOptions)(nil), // 23: google.protobuf.EnumOptions + (*EnumValueOptions)(nil), // 24: google.protobuf.EnumValueOptions + (*ServiceOptions)(nil), // 25: google.protobuf.ServiceOptions + (*MethodOptions)(nil), // 26: google.protobuf.MethodOptions + (*UninterpretedOption)(nil), // 27: google.protobuf.UninterpretedOption + (*SourceCodeInfo)(nil), // 28: google.protobuf.SourceCodeInfo + (*GeneratedCodeInfo)(nil), // 29: google.protobuf.GeneratedCodeInfo + (*DescriptorProto_ExtensionRange)(nil), // 30: google.protobuf.DescriptorProto.ExtensionRange + (*DescriptorProto_ReservedRange)(nil), // 31: google.protobuf.DescriptorProto.ReservedRange + (*EnumDescriptorProto_EnumReservedRange)(nil), // 32: google.protobuf.EnumDescriptorProto.EnumReservedRange + (*UninterpretedOption_NamePart)(nil), // 33: google.protobuf.UninterpretedOption.NamePart + (*SourceCodeInfo_Location)(nil), // 34: google.protobuf.SourceCodeInfo.Location + (*GeneratedCodeInfo_Annotation)(nil), // 35: google.protobuf.GeneratedCodeInfo.Annotation } var file_google_protobuf_descriptor_proto_depIdxs = []int32{ - 7, // 0: google.protobuf.FileDescriptorSet.file:type_name -> google.protobuf.FileDescriptorProto - 8, // 1: google.protobuf.FileDescriptorProto.message_type:type_name -> google.protobuf.DescriptorProto - 12, // 2: google.protobuf.FileDescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto - 14, // 3: google.protobuf.FileDescriptorProto.service:type_name -> google.protobuf.ServiceDescriptorProto - 10, // 4: google.protobuf.FileDescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto - 16, // 5: google.protobuf.FileDescriptorProto.options:type_name -> google.protobuf.FileOptions - 25, // 6: google.protobuf.FileDescriptorProto.source_code_info:type_name -> google.protobuf.SourceCodeInfo - 10, // 7: google.protobuf.DescriptorProto.field:type_name -> google.protobuf.FieldDescriptorProto - 10, // 8: google.protobuf.DescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto - 8, // 9: google.protobuf.DescriptorProto.nested_type:type_name -> google.protobuf.DescriptorProto - 12, // 10: google.protobuf.DescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto - 27, // 11: google.protobuf.DescriptorProto.extension_range:type_name -> google.protobuf.DescriptorProto.ExtensionRange - 11, // 12: google.protobuf.DescriptorProto.oneof_decl:type_name -> google.protobuf.OneofDescriptorProto - 17, // 13: google.protobuf.DescriptorProto.options:type_name -> google.protobuf.MessageOptions - 28, // 14: google.protobuf.DescriptorProto.reserved_range:type_name -> google.protobuf.DescriptorProto.ReservedRange - 24, // 15: google.protobuf.ExtensionRangeOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 10, // 0: google.protobuf.FileDescriptorSet.file:type_name -> google.protobuf.FileDescriptorProto + 11, // 1: google.protobuf.FileDescriptorProto.message_type:type_name -> google.protobuf.DescriptorProto + 15, // 2: google.protobuf.FileDescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto + 17, // 3: google.protobuf.FileDescriptorProto.service:type_name -> google.protobuf.ServiceDescriptorProto + 13, // 4: google.protobuf.FileDescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto + 19, // 5: google.protobuf.FileDescriptorProto.options:type_name -> google.protobuf.FileOptions + 28, // 6: google.protobuf.FileDescriptorProto.source_code_info:type_name -> google.protobuf.SourceCodeInfo + 13, // 7: google.protobuf.DescriptorProto.field:type_name -> google.protobuf.FieldDescriptorProto + 13, // 8: google.protobuf.DescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto + 11, // 9: google.protobuf.DescriptorProto.nested_type:type_name -> google.protobuf.DescriptorProto + 15, // 10: google.protobuf.DescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto + 30, // 11: google.protobuf.DescriptorProto.extension_range:type_name -> google.protobuf.DescriptorProto.ExtensionRange + 14, // 12: google.protobuf.DescriptorProto.oneof_decl:type_name -> google.protobuf.OneofDescriptorProto + 20, // 13: google.protobuf.DescriptorProto.options:type_name -> google.protobuf.MessageOptions + 31, // 14: google.protobuf.DescriptorProto.reserved_range:type_name -> google.protobuf.DescriptorProto.ReservedRange + 27, // 15: google.protobuf.ExtensionRangeOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption 1, // 16: google.protobuf.FieldDescriptorProto.label:type_name -> google.protobuf.FieldDescriptorProto.Label 0, // 17: google.protobuf.FieldDescriptorProto.type:type_name -> google.protobuf.FieldDescriptorProto.Type - 18, // 18: google.protobuf.FieldDescriptorProto.options:type_name -> google.protobuf.FieldOptions - 19, // 19: google.protobuf.OneofDescriptorProto.options:type_name -> google.protobuf.OneofOptions - 13, // 20: google.protobuf.EnumDescriptorProto.value:type_name -> google.protobuf.EnumValueDescriptorProto - 20, // 21: google.protobuf.EnumDescriptorProto.options:type_name -> google.protobuf.EnumOptions - 29, // 22: google.protobuf.EnumDescriptorProto.reserved_range:type_name -> google.protobuf.EnumDescriptorProto.EnumReservedRange - 21, // 23: google.protobuf.EnumValueDescriptorProto.options:type_name -> google.protobuf.EnumValueOptions - 15, // 24: google.protobuf.ServiceDescriptorProto.method:type_name -> google.protobuf.MethodDescriptorProto - 22, // 25: google.protobuf.ServiceDescriptorProto.options:type_name -> google.protobuf.ServiceOptions - 23, // 26: google.protobuf.MethodDescriptorProto.options:type_name -> google.protobuf.MethodOptions + 21, // 18: google.protobuf.FieldDescriptorProto.options:type_name -> google.protobuf.FieldOptions + 22, // 19: google.protobuf.OneofDescriptorProto.options:type_name -> google.protobuf.OneofOptions + 16, // 20: google.protobuf.EnumDescriptorProto.value:type_name -> google.protobuf.EnumValueDescriptorProto + 23, // 21: google.protobuf.EnumDescriptorProto.options:type_name -> google.protobuf.EnumOptions + 32, // 22: google.protobuf.EnumDescriptorProto.reserved_range:type_name -> google.protobuf.EnumDescriptorProto.EnumReservedRange + 24, // 23: google.protobuf.EnumValueDescriptorProto.options:type_name -> google.protobuf.EnumValueOptions + 18, // 24: google.protobuf.ServiceDescriptorProto.method:type_name -> google.protobuf.MethodDescriptorProto + 25, // 25: google.protobuf.ServiceDescriptorProto.options:type_name -> google.protobuf.ServiceOptions + 26, // 26: google.protobuf.MethodDescriptorProto.options:type_name -> google.protobuf.MethodOptions 2, // 27: google.protobuf.FileOptions.optimize_for:type_name -> google.protobuf.FileOptions.OptimizeMode - 24, // 28: google.protobuf.FileOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 24, // 29: google.protobuf.MessageOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 27, // 28: google.protobuf.FileOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 27, // 29: google.protobuf.MessageOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption 3, // 30: google.protobuf.FieldOptions.ctype:type_name -> google.protobuf.FieldOptions.CType 4, // 31: google.protobuf.FieldOptions.jstype:type_name -> google.protobuf.FieldOptions.JSType - 24, // 32: google.protobuf.FieldOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 24, // 33: google.protobuf.OneofOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 24, // 34: google.protobuf.EnumOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 24, // 35: google.protobuf.EnumValueOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 24, // 36: google.protobuf.ServiceOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 5, // 37: google.protobuf.MethodOptions.idempotency_level:type_name -> google.protobuf.MethodOptions.IdempotencyLevel - 24, // 38: google.protobuf.MethodOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 30, // 39: google.protobuf.UninterpretedOption.name:type_name -> google.protobuf.UninterpretedOption.NamePart - 31, // 40: google.protobuf.SourceCodeInfo.location:type_name -> google.protobuf.SourceCodeInfo.Location - 32, // 41: google.protobuf.GeneratedCodeInfo.annotation:type_name -> google.protobuf.GeneratedCodeInfo.Annotation - 9, // 42: google.protobuf.DescriptorProto.ExtensionRange.options:type_name -> google.protobuf.ExtensionRangeOptions - 43, // [43:43] is the sub-list for method output_type - 43, // [43:43] is the sub-list for method input_type - 43, // [43:43] is the sub-list for extension type_name - 43, // [43:43] is the sub-list for extension extendee - 0, // [0:43] is the sub-list for field type_name + 5, // 32: google.protobuf.FieldOptions.retention:type_name -> google.protobuf.FieldOptions.OptionRetention + 6, // 33: google.protobuf.FieldOptions.target:type_name -> google.protobuf.FieldOptions.OptionTargetType + 27, // 34: google.protobuf.FieldOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 27, // 35: google.protobuf.OneofOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 27, // 36: google.protobuf.EnumOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 27, // 37: google.protobuf.EnumValueOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 27, // 38: google.protobuf.ServiceOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 7, // 39: google.protobuf.MethodOptions.idempotency_level:type_name -> google.protobuf.MethodOptions.IdempotencyLevel + 27, // 40: google.protobuf.MethodOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 33, // 41: google.protobuf.UninterpretedOption.name:type_name -> google.protobuf.UninterpretedOption.NamePart + 34, // 42: google.protobuf.SourceCodeInfo.location:type_name -> google.protobuf.SourceCodeInfo.Location + 35, // 43: google.protobuf.GeneratedCodeInfo.annotation:type_name -> google.protobuf.GeneratedCodeInfo.Annotation + 12, // 44: google.protobuf.DescriptorProto.ExtensionRange.options:type_name -> google.protobuf.ExtensionRangeOptions + 8, // 45: google.protobuf.GeneratedCodeInfo.Annotation.semantic:type_name -> google.protobuf.GeneratedCodeInfo.Annotation.Semantic + 46, // [46:46] is the sub-list for method output_type + 46, // [46:46] is the sub-list for method input_type + 46, // [46:46] is the sub-list for extension type_name + 46, // [46:46] is the sub-list for extension extendee + 0, // [0:46] is the sub-list for field type_name } func init() { file_google_protobuf_descriptor_proto_init() } @@ -3940,7 +4333,7 @@ func file_google_protobuf_descriptor_proto_init() { File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_google_protobuf_descriptor_proto_rawDesc, - NumEnums: 6, + NumEnums: 9, NumMessages: 27, NumExtensions: 0, NumServices: 0, diff --git a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go index 8c10797b9..a6c7a33f3 100644 --- a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go @@ -37,8 +37,7 @@ // It is functionally a tuple of the full name of the remote message type and // the serialized bytes of the remote message value. // -// -// Constructing an Any +// # Constructing an Any // // An Any message containing another message value is constructed using New: // @@ -48,8 +47,7 @@ // } // ... // make use of any // -// -// Unmarshaling an Any +// # Unmarshaling an Any // // With a populated Any message, the underlying message can be serialized into // a remote concrete message value in a few ways. @@ -95,8 +93,7 @@ // listed in the case clauses are linked into the Go binary and therefore also // registered in the global registry. // -// -// Type checking an Any +// # Type checking an Any // // In order to type check whether an Any message represents some other message, // then use the MessageIs method: @@ -115,7 +112,6 @@ // } // ... // make use of m // } -// package anypb import ( @@ -136,45 +132,49 @@ import ( // // Example 1: Pack and unpack a message in C++. // -// Foo foo = ...; -// Any any; -// any.PackFrom(foo); -// ... -// if (any.UnpackTo(&foo)) { -// ... -// } +// Foo foo = ...; +// Any any; +// any.PackFrom(foo); +// ... +// if (any.UnpackTo(&foo)) { +// ... +// } // // Example 2: Pack and unpack a message in Java. // -// Foo foo = ...; -// Any any = Any.pack(foo); -// ... -// if (any.is(Foo.class)) { -// foo = any.unpack(Foo.class); -// } -// -// Example 3: Pack and unpack a message in Python. -// -// foo = Foo(...) -// any = Any() -// any.Pack(foo) -// ... -// if any.Is(Foo.DESCRIPTOR): -// any.Unpack(foo) -// ... -// -// Example 4: Pack and unpack a message in Go -// -// foo := &pb.Foo{...} -// any, err := anypb.New(foo) -// if err != nil { -// ... -// } -// ... -// foo := &pb.Foo{} -// if err := any.UnmarshalTo(foo); err != nil { -// ... -// } +// Foo foo = ...; +// Any any = Any.pack(foo); +// ... +// if (any.is(Foo.class)) { +// foo = any.unpack(Foo.class); +// } +// // or ... +// if (any.isSameTypeAs(Foo.getDefaultInstance())) { +// foo = any.unpack(Foo.getDefaultInstance()); +// } +// +// Example 3: Pack and unpack a message in Python. +// +// foo = Foo(...) +// any = Any() +// any.Pack(foo) +// ... +// if any.Is(Foo.DESCRIPTOR): +// any.Unpack(foo) +// ... +// +// Example 4: Pack and unpack a message in Go +// +// foo := &pb.Foo{...} +// any, err := anypb.New(foo) +// if err != nil { +// ... +// } +// ... +// foo := &pb.Foo{} +// if err := any.UnmarshalTo(foo); err != nil { +// ... +// } // // The pack methods provided by protobuf library will by default use // 'type.googleapis.com/full.type.name' as the type URL and the unpack @@ -182,35 +182,33 @@ import ( // in the type URL, for example "foo.bar.com/x/y.z" will yield type // name "y.z". // +// # JSON // -// JSON -// ==== // The JSON representation of an `Any` value uses the regular // representation of the deserialized, embedded message, with an // additional field `@type` which contains the type URL. Example: // -// package google.profile; -// message Person { -// string first_name = 1; -// string last_name = 2; -// } +// package google.profile; +// message Person { +// string first_name = 1; +// string last_name = 2; +// } // -// { -// "@type": "type.googleapis.com/google.profile.Person", -// "firstName": , -// "lastName": -// } +// { +// "@type": "type.googleapis.com/google.profile.Person", +// "firstName": , +// "lastName": +// } // // If the embedded message type is well-known and has a custom JSON // representation, that representation will be embedded adding a field // `value` which holds the custom JSON in addition to the `@type` // field. Example (for message [google.protobuf.Duration][]): // -// { -// "@type": "type.googleapis.com/google.protobuf.Duration", -// "value": "1.212s" -// } -// +// { +// "@type": "type.googleapis.com/google.protobuf.Duration", +// "value": "1.212s" +// } type Any struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -228,14 +226,14 @@ type Any struct { // scheme `http`, `https`, or no scheme, one can optionally set up a type // server that maps type URLs to message definitions as follows: // - // * If no scheme is provided, `https` is assumed. - // * An HTTP GET on the URL must yield a [google.protobuf.Type][] - // value in binary format, or produce an error. - // * Applications are allowed to cache lookup results based on the - // URL, or have them precompiled into a binary to avoid any - // lookup. Therefore, binary compatibility needs to be preserved - // on changes to types. (Use versioned type names to manage - // breaking changes.) + // - If no scheme is provided, `https` is assumed. + // - An HTTP GET on the URL must yield a [google.protobuf.Type][] + // value in binary format, or produce an error. + // - Applications are allowed to cache lookup results based on the + // URL, or have them precompiled into a binary to avoid any + // lookup. Therefore, binary compatibility needs to be preserved + // on changes to types. (Use versioned type names to manage + // breaking changes.) // // Note: this functionality is not currently available in the official // protobuf release, and it is not used for type URLs beginning with @@ -243,7 +241,6 @@ type Any struct { // // Schemes other than `http`, `https` (or the empty scheme) might be // used with implementation specific semantics. - // TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"` // Must be a valid serialized protocol buffer of the above specified type. Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` diff --git a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go index a583ca2f6..df709a8dd 100644 --- a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go @@ -35,8 +35,7 @@ // // The Duration message represents a signed span of time. // -// -// Conversion to a Go Duration +// # Conversion to a Go Duration // // The AsDuration method can be used to convert a Duration message to a // standard Go time.Duration value: @@ -65,15 +64,13 @@ // the resulting value to the closest representable value (e.g., math.MaxInt64 // for positive overflow and math.MinInt64 for negative overflow). // -// -// Conversion from a Go Duration +// # Conversion from a Go Duration // // The durationpb.New function can be used to construct a Duration message // from a standard Go time.Duration value: // // dur := durationpb.New(d) // ... // make use of d as a *durationpb.Duration -// package durationpb import ( @@ -96,43 +93,43 @@ import ( // // Example 1: Compute Duration from two Timestamps in pseudo code. // -// Timestamp start = ...; -// Timestamp end = ...; -// Duration duration = ...; +// Timestamp start = ...; +// Timestamp end = ...; +// Duration duration = ...; // -// duration.seconds = end.seconds - start.seconds; -// duration.nanos = end.nanos - start.nanos; +// duration.seconds = end.seconds - start.seconds; +// duration.nanos = end.nanos - start.nanos; // -// if (duration.seconds < 0 && duration.nanos > 0) { -// duration.seconds += 1; -// duration.nanos -= 1000000000; -// } else if (duration.seconds > 0 && duration.nanos < 0) { -// duration.seconds -= 1; -// duration.nanos += 1000000000; -// } +// if (duration.seconds < 0 && duration.nanos > 0) { +// duration.seconds += 1; +// duration.nanos -= 1000000000; +// } else if (duration.seconds > 0 && duration.nanos < 0) { +// duration.seconds -= 1; +// duration.nanos += 1000000000; +// } // // Example 2: Compute Timestamp from Timestamp + Duration in pseudo code. // -// Timestamp start = ...; -// Duration duration = ...; -// Timestamp end = ...; +// Timestamp start = ...; +// Duration duration = ...; +// Timestamp end = ...; // -// end.seconds = start.seconds + duration.seconds; -// end.nanos = start.nanos + duration.nanos; +// end.seconds = start.seconds + duration.seconds; +// end.nanos = start.nanos + duration.nanos; // -// if (end.nanos < 0) { -// end.seconds -= 1; -// end.nanos += 1000000000; -// } else if (end.nanos >= 1000000000) { -// end.seconds += 1; -// end.nanos -= 1000000000; -// } +// if (end.nanos < 0) { +// end.seconds -= 1; +// end.nanos += 1000000000; +// } else if (end.nanos >= 1000000000) { +// end.seconds += 1; +// end.nanos -= 1000000000; +// } // // Example 3: Compute Duration from datetime.timedelta in Python. // -// td = datetime.timedelta(days=3, minutes=10) -// duration = Duration() -// duration.FromTimedelta(td) +// td = datetime.timedelta(days=3, minutes=10) +// duration = Duration() +// duration.FromTimedelta(td) // // # JSON Mapping // @@ -143,8 +140,6 @@ import ( // encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should // be expressed in JSON format as "3.000000001s", and 3 seconds and 1 // microsecond should be expressed in JSON format as "3.000001s". -// -// type Duration struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache diff --git a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go index c9ae92132..61f69fc11 100644 --- a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go @@ -36,8 +36,7 @@ // The Timestamp message represents a timestamp, // an instant in time since the Unix epoch (January 1st, 1970). // -// -// Conversion to a Go Time +// # Conversion to a Go Time // // The AsTime method can be used to convert a Timestamp message to a // standard Go time.Time value in UTC: @@ -59,8 +58,7 @@ // ... // handle error // } // -// -// Conversion from a Go Time +// # Conversion from a Go Time // // The timestamppb.New function can be used to construct a Timestamp message // from a standard Go time.Time value: @@ -72,7 +70,6 @@ // // ts := timestamppb.Now() // ... // make use of ts as a *timestamppb.Timestamp -// package timestamppb import ( @@ -101,52 +98,50 @@ import ( // // Example 1: Compute Timestamp from POSIX `time()`. // -// Timestamp timestamp; -// timestamp.set_seconds(time(NULL)); -// timestamp.set_nanos(0); +// Timestamp timestamp; +// timestamp.set_seconds(time(NULL)); +// timestamp.set_nanos(0); // // Example 2: Compute Timestamp from POSIX `gettimeofday()`. // -// struct timeval tv; -// gettimeofday(&tv, NULL); +// struct timeval tv; +// gettimeofday(&tv, NULL); // -// Timestamp timestamp; -// timestamp.set_seconds(tv.tv_sec); -// timestamp.set_nanos(tv.tv_usec * 1000); +// Timestamp timestamp; +// timestamp.set_seconds(tv.tv_sec); +// timestamp.set_nanos(tv.tv_usec * 1000); // // Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`. // -// FILETIME ft; -// GetSystemTimeAsFileTime(&ft); -// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; +// FILETIME ft; +// GetSystemTimeAsFileTime(&ft); +// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; // -// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z -// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. -// Timestamp timestamp; -// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); -// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); +// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z +// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. +// Timestamp timestamp; +// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); +// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); // // Example 4: Compute Timestamp from Java `System.currentTimeMillis()`. // -// long millis = System.currentTimeMillis(); -// -// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) -// .setNanos((int) ((millis % 1000) * 1000000)).build(); +// long millis = System.currentTimeMillis(); // +// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) +// .setNanos((int) ((millis % 1000) * 1000000)).build(); // // Example 5: Compute Timestamp from Java `Instant.now()`. // -// Instant now = Instant.now(); -// -// Timestamp timestamp = -// Timestamp.newBuilder().setSeconds(now.getEpochSecond()) -// .setNanos(now.getNano()).build(); +// Instant now = Instant.now(); // +// Timestamp timestamp = +// Timestamp.newBuilder().setSeconds(now.getEpochSecond()) +// .setNanos(now.getNano()).build(); // // Example 6: Compute Timestamp from current time in Python. // -// timestamp = Timestamp() -// timestamp.GetCurrentTime() +// timestamp = Timestamp() +// timestamp.GetCurrentTime() // // # JSON Mapping // @@ -174,8 +169,6 @@ import ( // the Joda Time's [`ISODateTimeFormat.dateTime()`]( // http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime%2D%2D // ) to obtain a formatter capable of generating timestamps in this format. -// -// type Timestamp struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache diff --git a/vendor/modules.txt b/vendor/modules.txt index e818c40f0..c9c2de287 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,8 +1,11 @@ # 4d63.com/gochecknoglobals v0.1.0 ## explicit; go 1.15 4d63.com/gochecknoglobals/checknoglobals -# cloud.google.com/go/compute v1.7.0 -## explicit; go 1.15 +# cloud.google.com/go/compute v1.19.1 +## explicit; go 1.19 +cloud.google.com/go/compute/internal +# cloud.google.com/go/compute/metadata v0.2.3 +## explicit; go 1.19 cloud.google.com/go/compute/metadata # github.com/AlecAivazis/survey/v2 v2.3.5 ## explicit; go 1.13 @@ -353,7 +356,7 @@ github.com/breml/errchkjson github.com/butuzov/ireturn/analyzer github.com/butuzov/ireturn/config github.com/butuzov/ireturn/types -# github.com/cespare/xxhash/v2 v2.1.2 +# github.com/cespare/xxhash/v2 v2.2.0 ## explicit; go 1.11 github.com/cespare/xxhash/v2 # github.com/charithe/durationcheck v0.0.9 @@ -603,8 +606,9 @@ github.com/golang/groupcache/lru github.com/golang/mock/gomock github.com/golang/mock/mockgen github.com/golang/mock/mockgen/model -# github.com/golang/protobuf v1.5.2 +# github.com/golang/protobuf v1.5.3 ## explicit; go 1.9 +github.com/golang/protobuf/jsonpb github.com/golang/protobuf/proto github.com/golang/protobuf/ptypes github.com/golang/protobuf/ptypes/any @@ -679,7 +683,7 @@ github.com/google/gnostic/extensions github.com/google/gnostic/jsonschema github.com/google/gnostic/openapiv2 github.com/google/gnostic/openapiv3 -# github.com/google/go-cmp v0.5.8 +# github.com/google/go-cmp v0.5.9 ## explicit; go 1.13 github.com/google/go-cmp/cmp github.com/google/go-cmp/cmp/internal/diff @@ -696,12 +700,12 @@ github.com/google/shlex # github.com/google/uuid v1.3.0 ## explicit github.com/google/uuid -# github.com/googleapis/enterprise-certificate-proxy v0.1.0 -## explicit; go 1.18 +# github.com/googleapis/enterprise-certificate-proxy v0.2.3 +## explicit; go 1.19 github.com/googleapis/enterprise-certificate-proxy/client github.com/googleapis/enterprise-certificate-proxy/client/util -# github.com/googleapis/gax-go/v2 v2.4.0 -## explicit; go 1.15 +# github.com/googleapis/gax-go/v2 v2.7.1 +## explicit; go 1.19 github.com/googleapis/gax-go/v2 github.com/googleapis/gax-go/v2/apierror github.com/googleapis/gax-go/v2/apierror/internal/proto @@ -1271,7 +1275,7 @@ github.com/prometheus/client_golang/prometheus/collectors github.com/prometheus/client_golang/prometheus/internal github.com/prometheus/client_golang/prometheus/promhttp github.com/prometheus/client_golang/prometheus/testutil/promlint -# github.com/prometheus/client_model v0.2.0 +# github.com/prometheus/client_model v0.3.0 ## explicit; go 1.9 github.com/prometheus/client_model/go # github.com/prometheus/common v0.37.0 @@ -1360,9 +1364,10 @@ github.com/sonatard/noctx/reqwithoutctx # github.com/sourcegraph/go-diff v0.6.1 ## explicit; go 1.14 github.com/sourcegraph/go-diff/diff -# github.com/spf13/afero v1.8.2 -## explicit; go 1.13 +# github.com/spf13/afero v1.9.2 +## explicit; go 1.16 github.com/spf13/afero +github.com/spf13/afero/internal/common github.com/spf13/afero/mem # github.com/spf13/cast v1.5.0 ## explicit; go 1.18 @@ -1385,11 +1390,11 @@ github.com/ssgreg/nlreturn/v2/pkg/nlreturn # github.com/stbenjam/no-sprintf-host-port v0.1.1 ## explicit; go 1.16 github.com/stbenjam/no-sprintf-host-port/pkg/analyzer -# github.com/stretchr/objx v0.4.0 +# github.com/stretchr/objx v0.5.0 ## explicit; go 1.12 github.com/stretchr/objx -# github.com/stretchr/testify v1.8.0 -## explicit; go 1.13 +# github.com/stretchr/testify v1.8.3 +## explicit; go 1.20 github.com/stretchr/testify/assert github.com/stretchr/testify/mock # github.com/subosito/gotenv v1.4.0 @@ -1480,7 +1485,7 @@ go.mongodb.org/mongo-driver/bson/bsonrw go.mongodb.org/mongo-driver/bson/bsontype go.mongodb.org/mongo-driver/bson/primitive go.mongodb.org/mongo-driver/x/bsonx/bsoncore -# go.opencensus.io v0.23.0 +# go.opencensus.io v0.24.0 ## explicit; go 1.13 go.opencensus.io go.opencensus.io/internal @@ -1530,14 +1535,14 @@ golang.org/x/crypto/sha3 golang.org/x/crypto/ssh golang.org/x/crypto/ssh/internal/bcrypt_pbkdf golang.org/x/crypto/ssh/terminal -# golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e +# golang.org/x/exp v0.0.0-20220827204233-334a2380cb91 ## explicit; go 1.18 golang.org/x/exp/constraints golang.org/x/exp/slices # golang.org/x/exp/typeparams v0.0.0-20220613132600-b0d781184e0d ## explicit; go 1.18 golang.org/x/exp/typeparams -# golang.org/x/mod v0.8.0 +# golang.org/x/mod v0.9.0 ## explicit; go 1.17 golang.org/x/mod/internal/lazyregexp golang.org/x/mod/modfile @@ -1546,7 +1551,6 @@ golang.org/x/mod/semver # golang.org/x/net v0.17.0 ## explicit; go 1.17 golang.org/x/net/context -golang.org/x/net/context/ctxhttp golang.org/x/net/html golang.org/x/net/html/atom golang.org/x/net/html/charset @@ -1557,8 +1561,8 @@ golang.org/x/net/idna golang.org/x/net/internal/timeseries golang.org/x/net/trace golang.org/x/net/websocket -# golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2 -## explicit; go 1.15 +# golang.org/x/oauth2 v0.7.0 +## explicit; go 1.17 golang.org/x/oauth2 golang.org/x/oauth2/authhandler golang.org/x/oauth2/google @@ -1603,10 +1607,10 @@ golang.org/x/text/transform golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm golang.org/x/text/width -# golang.org/x/time v0.0.0-20220609170525-579cf78fd858 +# golang.org/x/time v0.3.0 ## explicit golang.org/x/time/rate -# golang.org/x/tools v0.6.0 +# golang.org/x/tools v0.7.0 ## explicit; go 1.18 golang.org/x/tools/cmd/goimports golang.org/x/tools/cover @@ -1684,24 +1688,23 @@ golang.org/x/tools/internal/typesinternal # gomodules.xyz/jsonpatch/v2 v2.2.0 ## explicit; go 1.12 gomodules.xyz/jsonpatch/v2 -# google.golang.org/api v0.91.0 -## explicit; go 1.15 +# google.golang.org/api v0.114.0 +## explicit; go 1.19 google.golang.org/api/cloudresourcemanager/v1 google.golang.org/api/compute/v1 google.golang.org/api/dns/v1 google.golang.org/api/googleapi google.golang.org/api/googleapi/transport google.golang.org/api/internal +google.golang.org/api/internal/cert google.golang.org/api/internal/gensupport google.golang.org/api/internal/impersonate google.golang.org/api/internal/third_party/uritemplates google.golang.org/api/option google.golang.org/api/option/internaloption google.golang.org/api/serviceusage/v1 -google.golang.org/api/transport/cert google.golang.org/api/transport/http google.golang.org/api/transport/http/internal/propagation -google.golang.org/api/transport/internal/dca # google.golang.org/appengine v1.6.7 ## explicit; go 1.11 google.golang.org/appengine @@ -1714,13 +1717,13 @@ google.golang.org/appengine/internal/modules google.golang.org/appengine/internal/remote_api google.golang.org/appengine/internal/urlfetch google.golang.org/appengine/urlfetch -# google.golang.org/genproto v0.0.0-20220808131553-a91ffa7f803e -## explicit; go 1.18 +# google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 +## explicit; go 1.19 google.golang.org/genproto/googleapis/rpc/code google.golang.org/genproto/googleapis/rpc/errdetails google.golang.org/genproto/googleapis/rpc/status -# google.golang.org/grpc v1.48.0 => google.golang.org/grpc v1.40.0 -## explicit; go 1.11 +# google.golang.org/grpc v1.56.3 => google.golang.org/grpc v1.56.3 +## explicit; go 1.17 google.golang.org/grpc google.golang.org/grpc/attributes google.golang.org/grpc/backoff @@ -1729,14 +1732,17 @@ google.golang.org/grpc/balancer/base google.golang.org/grpc/balancer/grpclb/state google.golang.org/grpc/balancer/roundrobin google.golang.org/grpc/binarylog/grpc_binarylog_v1 +google.golang.org/grpc/channelz google.golang.org/grpc/codes google.golang.org/grpc/connectivity google.golang.org/grpc/credentials +google.golang.org/grpc/credentials/insecure google.golang.org/grpc/encoding google.golang.org/grpc/encoding/proto google.golang.org/grpc/grpclog google.golang.org/grpc/internal google.golang.org/grpc/internal/backoff +google.golang.org/grpc/internal/balancer/gracefulswitch google.golang.org/grpc/internal/balancerload google.golang.org/grpc/internal/binarylog google.golang.org/grpc/internal/buffer @@ -1748,6 +1754,7 @@ google.golang.org/grpc/internal/grpcrand google.golang.org/grpc/internal/grpcsync google.golang.org/grpc/internal/grpcutil google.golang.org/grpc/internal/metadata +google.golang.org/grpc/internal/pretty google.golang.org/grpc/internal/resolver google.golang.org/grpc/internal/resolver/dns google.golang.org/grpc/internal/resolver/passthrough @@ -1765,7 +1772,7 @@ google.golang.org/grpc/serviceconfig google.golang.org/grpc/stats google.golang.org/grpc/status google.golang.org/grpc/tap -# google.golang.org/protobuf v1.28.1 +# google.golang.org/protobuf v1.30.0 ## explicit; go 1.11 google.golang.org/protobuf/encoding/protojson google.golang.org/protobuf/encoding/prototext @@ -2346,7 +2353,7 @@ sigs.k8s.io/yaml # github.com/uber/athenadriver => github.com/uber/athenadriver v1.1.10 # github.com/willf/bitset => github.com/bits-and-blooms/bitset v1.2.1 # google.golang.org/cloud => cloud.google.com/go v0.97.0 -# google.golang.org/grpc => google.golang.org/grpc v1.40.0 +# google.golang.org/grpc => google.golang.org/grpc v1.56.3 # k8s.io/klog/v2 => k8s.io/klog/v2 v2.70.1 # k8s.io/kube-openapi => k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1 # k8s.io/kube-state-metrics => k8s.io/kube-state-metrics v1.9.7